示例#1
0
def make_reg_masks(regfile,shape):
    r = pyregion.open(regfile)

    if len(r) != 2:
        raise Exception('Exactly two box regions required')

    paths = []
    for reg in r:
        paths.append(get_region_box(reg.coord_list))

    #Always have A be the top half
    if paths[0].get_extents().ymax > paths[1].get_extents().ymax:
        pathA = paths[0]
        pathB = paths[1]
    else:
        pathA = paths[1]
        pathB = paths[2]


    print 'Building skymasks'
    maskA = np.array([True if pathA.contains_point([x,y]) else False for x,y in np.ndindex(shape)])
    maskA = maskA.reshape(shape).T
    
    maskB = np.array([True if pathB.contains_point([x,y]) else False for x,y in np.ndindex(shape)])
    maskB = maskB.reshape(shape).T

    return (~maskA, ~maskB)
    def run(self, triplet, is_sym_fc3_q=False):
        num_patom = self._primitive.get_number_of_atoms()
        if is_sym_fc3_q:
            index_exchage = np.array([[0,1,2],[1,2,0],[2,0,1],[0,2,1],[1,0,2],[2,1,0]])
            fc3_reciprocal = np.zeros(
                (num_patom, num_patom, num_patom, 3, 3, 3), dtype='complex128')
            for e, index in enumerate(index_exchage):
                self._triplet = triplet[index]
                self._fc3_reciprocal = np.zeros(
                    (num_patom, num_patom, num_patom, 3, 3, 3), dtype='complex128')
                self._real_to_reciprocal()
                for patoms in np.ndindex((num_patom, num_patom, num_patom)):
                    i,j,k = np.array(patoms)
                    ii, ji, ki = np.array(patoms)[index]
                    for cart in np.ndindex((3,3,3)):
                        l, m, n = np.array(cart)
                        li, mi, ni = np.array(cart)[index]
                        fc3_reciprocal[i,j,k,l,m,n] += self._fc3_reciprocal[ii, ji, ki, li, mi, ni] / 6
            self._fc3_reciprocal[:] = fc3_reciprocal

        else:
            self._triplet = triplet
            self._fc3_reciprocal = np.zeros(
                (num_patom, num_patom, num_patom, 3, 3, 3), dtype='complex128')
            self._real_to_reciprocal()
示例#3
0
文件: stat.py 项目: itmat/pade
def confidence_scores(raw_counts, perm_counts, num_features):
    """Return confidence scores.
    
    """
    logging.debug(("Getting confidence scores for shape {shape} with "
                   "{num_features} features").format(
            shape=np.shape(raw_counts),
            num_features=num_features))
    if np.shape(raw_counts) != np.shape(perm_counts):
        raise Exception((
                "raw_counts and perm_counts must have same shape. "
                "raw_counts is {raw} and perm_counts is {perm}").format(
                raw=np.shape(raw_counts), perm=np.shape(perm_counts)))
    
    shape = np.shape(raw_counts)
    adjusted = np.zeros(shape)
    for idx in np.ndindex(shape[:-1]):
        adjusted[idx] = adjust_num_diff(perm_counts[idx], raw_counts[idx], num_features)

    # (unpermuted counts - mean permuted counts) / unpermuted counts
    res = (raw_counts - adjusted) / raw_counts

    for idx in np.ndindex(res.shape[:-1]):
        res[idx] = ensure_scores_increase(res[idx])

    return res
示例#4
0
 def __init__(self, filename):
     print('Loading \"%s\" ..' % filename)
     with open(filename) as f:
         if f.readline().strip() != 'OFF': raise Exception("Invalid format")
         self.nverts, self.nfaces, _ = map(int, f.readline().split())
         self.vertices, self.faces = np.zeros((self.nverts, 3)), np.zeros((self.nfaces, 3), np.uint32)
         for i in range(self.nverts):
             self.vertices[i, :] = np.fromstring(f.readline(), sep=' ')
         for i in range(self.nfaces):
             self.faces[i, :] = np.fromstring(f.readline(), sep=' ', dtype=np.uint32)[1:]
     print('Computing face and vertex normals ..')
     v = [self.vertices[self.faces[:, i], :] for i in range(3)]
     face_normals = np.cross(v[2] - v[0], v[1] - v[0])
     face_normals /= np.linalg.norm(face_normals, axis=1)[:, None]
     self.normals = np.zeros((self.nverts, 3))
     for i, j in np.ndindex(self.faces.shape):
         self.normals[self.faces[i, j], :] += face_normals[i, :]
     self.normals /= np.linalg.norm(self.normals, axis=1)[:, None]
     print('Building adjacency matrix ..')
     self.adjacency = [set() for _ in range(self.nfaces)]
     for i, j in np.ndindex(self.faces.shape):
         e0, e1 = self.faces[i, j], self.faces[i, (j+1)%3]
         self.adjacency[e0].add(e1)
         self.adjacency[e1].add(e0)
     print('Randomly initializing fields ..')
     self.o_field = np.zeros((self.nverts, 3))
     self.p_field = np.zeros((self.nverts, 3))
     min_pos, max_pos = self.vertices.min(axis=0), self.vertices.max(axis=0)
     np.random.seed(0)
     for i in range(self.nverts):
         d, p = np.random.standard_normal(3), np.random.random(3)
         d -= np.dot(d, self.normals[i]) * self.normals[i]
         self.o_field[i] = d / np.linalg.norm(d)
         self.p_field[i] = (1-p) * min_pos + p * max_pos
示例#5
0
文件: test_pool.py 项目: wgapl/Theano
    def numpy_max_pool_nd(input, ds, ignore_border=False, mode='max'):
        '''Helper function, implementing pool_nd in pure numpy'''
        if len(input.shape) < len(ds):
            raise NotImplementedError('input should have at least %s dim,'
                                      ' shape is %s'
                                      % (str(ds), str(input.shape)))
        nd = len(ds)
        si = [0] * nd
        if not ignore_border:
            for i in range(nd):
                if input.shape[-nd + i] % ds[i]:
                    si[i] += 1
        out_shp = list(input.shape[:-nd])
        for i in range(nd):
            out_shp.append(input.shape[-nd + i] // ds[i] + si[i])
        output_val = numpy.zeros(out_shp)
        func = numpy.max
        if mode == 'sum':
            func = numpy.sum
        elif mode != 'max':
            func = numpy.average

        for l in numpy.ndindex(*input.shape[:-nd]):
            for r in numpy.ndindex(*output_val.shape[-nd:]):
                patch = input[l][tuple(slice(r[i] * ds[i], (r[i] + 1) * ds[i])
                                       for i in range(nd))]
                output_val[l][r] = func(patch)
        return output_val
示例#6
0
def ndmeshgrid(grids, hnode=None):
    """
    Converts a list of (start, stop, n) tuples to an 'n-dimensional meshgrid'.
    In two dimensions, this would be:
    
        x = linspace(*grids[0])
        y = linspace(*grids[1])
        x,y = meshgrid(x,y)
        z = concatenate(x,y,axis=-1)
    
    or something like that. Also returns the number of locations in each direction
    as a list.
    """
    ndim = len(grids)
    grids = np.asarray(grids)
    ns = grids[:,2]
    axes = [np.linspace(*grid) for grid in grids]
    if hnode is None:
        x = np.empty(list(ns)+[ndim])
        for index in np.ndindex(*ns):
            x[index+(None,)] = [axes[i][index[i]] for i in xrange(ndim)]
        return np.atleast_2d(x.squeeze()), ns            
    else:
        for index in np.ndindex(*ns):
            hnode[index] = [axes[i][index[i]] for i in xrange(ndim)]
        return ns
示例#7
0
def set_permutation_symmetry_fc3_deprecated(fc3):
    fc3_sym = np.zeros(fc3.shape, dtype='double')
    for (i, j, k) in list(np.ndindex(fc3.shape[:3])):
        fc3_sym[i, j, k] = set_permutation_symmetry_fc3_elem(fc3, i, j, k)

    for (i, j, k) in list(np.ndindex(fc3.shape[:3])):
        fc3[i, j, k] = fc3_sym[i, j, k]
示例#8
0
    def _nppma_ndindex(self, tup, N=None, ecut=None):
        """Generates vibrational signatures in NP per mode Aproximation (NPPMA) 
        
        """
        if N is None:
            raise Exception("Number of states to be used must be defined")

        if ecut is not None:
            vec = self.convert_energy_2_internal_u(ecut)

        two = numpy.zeros(len(tup), dtype=numpy.int)
        two[:] = N + 1

        for i in range(len(two)):
            if two[i] > tup[i] + 1:
                two[i] = tup[i] + 1
                
        shp = tuple(two)
        
        if ecut is not None:
            #with energy_units("int"):    
            for sig in numpy.ndindex(shp):
                en = self.convert_energy_2_internal_u(self.vibenergy(vsig=sig))
                if en <= vec:
                    yield sig
                        
        else:
            for sig in numpy.ndindex(shp):
                yield sig        
示例#9
0
def procedure(ticks):
    n = 500
    b = .000006662 
    D = 1
    alpha = 2

    n_types = ticks**D
    #print 'Number of types: {}'.format(n_types)
    M = np.zeros([ticks**D, ticks**D])
    registry = {}
    
    next_id = 0

    for index in np.ndindex(tuple([ticks] * D)):
        i = index[:D]
        registry[i] = next_id 
        next_id += 1

    for index in np.ndindex(tuple([ticks]* D * 2)):
        i = index[:D]
        j = index[D:]

        if i != j:
            pos_i = [float(_i) / (ticks - 1) for _i in i]
            pos_j = [float(_j) / (ticks - 1) for _j in j]

            M[registry[i], registry[j]] = .5 * n**2 / n_types**2 *\
                b / (b + model.distance(None, pos_i, pos_j)**alpha) 

    eigvals = scipy.linalg.eigvals(M) 
    return max(eigvals)
示例#10
0
def garnet_gen_s( Ns, Na, Nb, sparsity, neighbor):
    ### generating the Kernel
    kernel = np.zeros((Ns, Ns, Na))  # p(s'|s,a)
    for i, j in np.ndindex((Ns, Na)):
        echantillon = rd.sample(list(set(range(Ns)).intersection(range(i-neighbor,i+neighbor))), Nb)
        cumulative = np.concatenate(([0], sort([rd.random() for k in range(Nb - 1)]), [1]), axis=0)
        for k in range(Nb):
            kernel[echantillon[k], i, j] = cumulative[k + 1] - cumulative[k]
    ### generating rewards at random
    reward0 = np.zeros((Ns, Na))
    reward1 = np.zeros((Ns, Na))

    biais0 = np.random.randn(Ns)
    biais1 = np.random.randn(Ns)
    for i, j in np.ndindex((Ns, Na)):
        reward0[i,j] = biais0[i]
        reward1[i,j] = biais1[i]

    masque_reward = np.zeros((Ns, Na))
    N_sparsity = int(Ns * sparsity)
    i = 0
    while i < N_sparsity:
        i_ = rd.randint(0, Ns - 1)
        if masque_reward[i_, 0] == 0:
            masque_reward[i_, :] = 1
            i += 1
    reward0 = reward0 * masque_reward
    reward1 = reward1 * masque_reward
    control = np.random.randint(2, size=Ns)

    return Ns, Na, kernel, reward0, reward1, control
示例#11
0
文件: pos.py 项目: proggy/tb
 def __call__(self, distinguish=False):
     """Get all positions as a list. If *distinguish* is *True*, nest the
     position list inside a 1-tuple (as the positions cannot be
     distinguished any further in this case)."""
     # 2012-05-03 - 2012-09-03
     return (list(numpy.ndindex(self.shape)),) if distinguish \
         else list(numpy.ndindex(self.shape))
示例#12
0
    def calc_eigs(self, sort=True, symmetric=False, mandel=False):
        if symmetric:
            eigfun=np.linalg.eigvalsh
        else:
            eigfun=np.linalg.eigvals

        if self.order==2:
            eigs=np.zeros(self.shape[-1]+self.N)
            for ind in np.ndindex(self.N):
                mat=self.val[:, :][ind]
                eigs.append(eigfun(mat))
        elif self.order==4:
            if mandel:
                matrixfun=lambda x: ElasticTensor.create_mandel(x)
                d=self.shape[2]
                eigdim=d*(d+1)/2
            else:
                matshape=(self.shape[0]*self.shape[1], self.shape[2]*self.shape[3])
                matrixfun=lambda x: np.reshape(val[ind], matshape)
                eigdim=self.shape[2]*self.shape[3]

            eigs=np.zeros(self.N+(eigdim,))
            val=np.copy(self.val)
            for ii in range(self.dim):
                val=np.rollaxis(val, self.val.ndim-self.dim+ii, ii)

            for ind in np.ndindex(*self.N):
                mat=matrixfun(val[ind])
                eigs[ind]=eigfun(mat)

        eigs=np.array(eigs)
        if sort:
            eigs=np.sort(eigs, axis=0)
        return eigs
示例#13
0
def process(destinations_list):
    # out
    out = [idx for idx in np.ndindex(destinations_list[0].arr.shape)]
    # generate slices list of dictionaries
    if len(destinations_list[-1].arr.shape) > 1:
        # multidimensional scan
        slices = []
        for i, idx in enumerate(np.ndindex(destinations_list[-1].arr.shape[:-1])):
            s = {}
            s['index'] = destinations_list[-1].arr.shape[-1]*i
            s['name'] = destinations_list[-1].hardware.name
            s['units'] = destinations_list[-1].units
            s['points'] = destinations_list[-1].arr[idx]
            if destinations_list[-1].method == 'set_position':
                s['use actual'] = True
            else:
                s['use actual'] = False
            slices.append(s)
    else:
        # 1D scan
        s = {}
        s['index'] = 0
        s['name'] = destinations_list[0].hardware.name
        s['units'] = destinations_list[0].units
        s['points'] = destinations_list[0].arr
        if destinations_list[0].method == 'set_position':
            s['use actual'] = True
        else:
            s['use actual'] = False
        slices = [s]
    return out, slices
示例#14
0
文件: net.py 项目: nhaliday/ai
    def backprop(self, targets, mu):
        if len(targets) != self.nout:
            raise ValueError('wrong number of targets')

        # output deltas first
        # partial E wrt v_k = sum w_jk z_j where a_k = sigmoid(v_k)
        # odelta = (self.aout - targets) * dsigmoid(self.aout)
        # hdelta = np.dot(odelta, self.wout)[:-1] * dsigmoid(self.ain[:-1])
        
        # matrix ops not working for some reason :(, I have a bug
        # time to be more straightforward

        odelta = np.zeros(self.nout)
        for k in range(self.nout):
            odelta[k] = (self.aout[k] - targets[k]) * dsigmoid(self.aout[k])

        for k, j in np.ndindex(self.wout.shape):
            if self.nhid:
                self.wout[k, j] -= mu * odelta[k] * self.ahid[j]
            else:
                self.wout[k, j] -= mu * odelta[k] * self.ain[j]


        if self.nhid:
            hdelta = np.zeros(self.nhid)
            for j in range(self.nhid):
                hdelta[j] = dsigmoid(self.ahid[j]) * np.dot(self.wout[:, j], odelta)

            for j, i in np.ndindex(self.win.shape):
                self.win[j, i] -= mu * hdelta[j] * self.ain[i]

        # self.wout -= mu * np.outer(odelta, self.aout)
        # self.win -= mu * np.outer(hdelta, self.ain)

        return 0.5 * np.linalg.norm(targets - self.aout)
示例#15
0
    def __init__(self):

        n = 3 # colors
        m = len(kernels2)
        (h, w) = kernels2[0].shape
        X = numpy.zeros(( (2**m)*(2**n), w*h*3),dtype='float32')
        idx = 0
        for i in numpy.ndindex(*([2]*m)):
            for j in numpy.ndindex(*([2]*n)):

                example = numpy.zeros((n,h,w), dtype='float32')
                for k in xrange(m):
                    if i[k]:
                        example[0,:,:] += j[0] * kernels2[k]
                        example[1,:,:] += j[1] * kernels2[k]
                        example[2,:,:] += j[2] * kernels2[k]

                X[idx,:] = example.reshape(h * w * n)
                idx += 1

        view_converter = dense_design_matrix.DefaultViewConverter((h,w,1))

        super(SuperImposedShapes,self).__init__(X = X, view_converter = view_converter)

        assert not numpy.any(numpy.isnan(self.X))
示例#16
0
文件: wcsnd.py 项目: pdeiml/gammapy
    def _make_default_data(geom, shape_np, dtype):
        # Check whether corners of each image plane are valid
        coords = []
        if not geom.is_regular:
            for idx in np.ndindex(geom.shape):
                pix = (np.array([0.0, float(geom.npix[0][idx] - 1)]),
                       np.array([0.0, float(geom.npix[1][idx] - 1)]))
                pix += tuple([np.array(2 * [t]) for t in idx])
                coords += geom.pix_to_coord(pix)

        else:
            pix = (np.array([0.0, float(geom.npix[0] - 1)]),
                   np.array([0.0, float(geom.npix[1] - 1)]))
            pix += tuple([np.array(2 * [0.0]) for i in range(geom.ndim - 2)])
            coords += geom.pix_to_coord(pix)

        if np.all(np.isfinite(np.vstack(coords))):
            if geom.is_regular:
                data = np.zeros(shape_np, dtype=dtype)
            else:
                data = np.full(shape_np, np.nan, dtype=dtype)
                for idx in np.ndindex(geom.shape):
                    data[idx,
                         slice(geom.npix[0][idx]),
                         slice(geom.npix[1][idx])] = 0.0
        else:
            data = np.full(shape_np, np.nan, dtype=dtype)
            idx = geom.get_idx()
            m = np.all(np.stack([t != -1 for t in idx]), axis=0)
            data[m] = 0.0

        return data
示例#17
0
def python_local_maxima(data, wsize, mode=wrap):
  result = np.ones(shape=data.shape,dtype='bool')
  for pos in np.ndindex(data.shape):
    myval = data[pos]  
    for offset in np.ndindex(wsize):
      neighbor_idx = tuple(mode(p, o-w/2, w) for (p, o, w) in zip(pos, offset, wsize))
      result[pos] &= (data[neighbor_idx] <= myval)
  return result 
 def _geo_ts_to_vec(self, data, pts):
     res = {}
     for name, ts in data.items():
         tpe = self.source_type_map[name] 
         ids = [idx for idx in np.ndindex(pts.shape[:-1])]
         res[name] = tpe.vector_t([tpe(api.GeoPoint(*pts[idx]),
                                   ts[idx]) for idx in np.ndindex(pts.shape[:-1])])
     return res
示例#19
0
文件: spectrum.py 项目: molmod/yaff
 def _iter_indexes(self, array):
     if self.select is None:
         for indexes in np.ndindex(array.shape[1:]):
             yield indexes
     else:
         for i0 in self.select:
             for irest in np.ndindex(array.shape[2:]):
                 yield (i0,) + irest
示例#20
0
    def run(self, spikesorter, 
                        k_max=1,
                        k_inc=1,
                        from_fullband=False,
                        median_thresh=5.,
                        consistent_across_channels = False,
                        consistent_across_segments = True,
                        merge_method = 'fast',
                        sweep_clean_size = 0.8*pq.ms,
                        ):
                        
        sps = spikesorter

        # What is the source signal?
        if (from_fullband or (sps.filtered_sigs is None)):
            MTEO_sigs = np.empty( sps.full_band_sigs.shape, dtype = object)
            sigs=sps.full_band_sigs
        else:
            MTEO_sigs = np.empty( sps.filtered_sigs.shape, dtype = object)
            sigs=sps.filtered_sigs

        # Compute MTEO signals
        for c,s in np.ndindex(sigs.shape) :  
            sig = sigs[c, s]
            kTEO_sig=np.zeros(sig.size)
            #compute all k-TEO, including k_max if possible
            for k in range(1,k_max+1,k_inc): 
                s1 = sig[0:-2*k]
                s2 = sig[k:-k]
                s3 = sig[2*k:]
                # standard kTEO signal
                kTEO_sig[k:-k]=ne.evaluate("s2**2-s1*s3") 
                hamm = hamming(4*(k+1)+1)#smoothing window
                norm=np.sqrt(3*(hamm**2.)+(hamm.sum())**2.)
                hamm = hamm/norm # normalization of the window 
                #proposed by Choi et al. to prevent excess of false detections at small k
                kTEO_sig=np.convolve(kTEO_sig,hamm,'same')
                if k==1:
                    MTEO_sig=kTEO_sig.copy()
                else:
                    #take the max over all kTEO iteratively
                    MTEO_sig=ne.evaluate("where(MTEO_sig<kTEO_sig,kTEO_sig,MTEO_sig)") 
                        
            MTEO_sigs[c,s]=MTEO_sig

        # Threshold estimation
        thresholds = np.zeros(MTEO_sigs.shape, dtype = float)
        for c, s in np.ndindex(MTEO_sigs.shape):
            sig = MTEO_sigs[c, s]
            thresholds[c, s] = abs(median_thresh) * np.median(abs(sig)) / .6745
        
        # Detect
        sweep_size = int((sps.sig_sampling_rate*sweep_clean_size).simplified)
        sps.spike_index_array = threshold_detection_multi_channel_multi_segment(
                                MTEO_sigs, thresholds, '+', 
                                consistent_across_channels,consistent_across_segments,
                                sweep_size, merge_method = merge_method,)
def local_maxima(data, mode=wrap):
  wsize = data.shape
  result = np.ones(data.shape, bool)
  for pos in np.ndindex(data.shape):
    myval = data[pos]
    for offset in np.ndindex(wsize):
      neighbor_idx = tuple(mode(p, o-w//2, w) for (p, o, w) in zip(pos, offset, wsize))
      result[pos] &= (data[neighbor_idx] <= myval)
  return result
示例#22
0
def read_css():

    """I may never have written a more painful function in my life.

    If you want data, and are thinking of using numpy or pandas --
    read it in by hand.
    """
    # Can't delete array elements.
    #ix = 1 #so as to skip the header row.
    n = 21
    chunk = 2*n + 3 # Both matrices, and 3 extra rows
    advice = np.zeros((n, n, n))
    friendship = np.zeros((n, n, n))

    pdf = pd.read_csv("/Users/alexloewi/Documents/Data/cognitive social structures/rearranged_cogsocstr.txt", sep="\t")

    matrix_columns = pdf[pdf.columns[0:21]]
    #print 'matrix columns!!!!!!,', matrix_columns

    for perceiver in range(n):
        # This gets all the data for one person
        x = (chunk*perceiver)
        y = (chunk*(perceiver+1))-1

        a = np.array(matrix_columns.ix[x:x+20])
        np.fill_diagonal(a, 0)
        f = np.array(matrix_columns.ix[x+21:x+41])
        np.fill_diagonal(f, 0)
        advice[perceiver,:,:]     = a #np.array(matrix_columns.ix[0:20])
        friendship[perceiver,:,:] = f #np.array(matrix_columns.ix[21:41])

    # Consensus matrices (AND rule)
    ca = np.zeros((n,n))
    cf = np.zeros((n,n))

    for i,j in np.ndindex(ca.shape):
        if advice[i,i,j] + advice[j,i,j] == 2:
            ca[i,j] = 1

    for i,j in np.ndindex(cf.shape):
        if friendship[i,i,j] + friendship[j,i,j] == 2:
            cf[i,j] = 1

    # Self-proclaimed relationships (OR rule)
    sa = np.zeros((n,n))
    sf = np.zeros((n,n))

    for i,j in np.ndindex(sa.shape):
        if advice[i,i,j] + advice[j,i,j] >= 1:
            sa[i,j] = 1

    for i,j in np.ndindex(sf.shape):
        if friendship[i,i,j] + friendship[j,i,j] >= 1:
            sf[i,j] = 1


    return advice, friendship, ca, cf, sa, sf
示例#23
0
 def __init__(self, context, images):
     self.context = context
     self.tiles = np.empty(images.shape, object)
     for c in np.ndindex(self.tiles.shape):
         images[c].data = context.pre(images[c].data)
         from lib.canvas import Image
         Image(None, context.features(images[c].data)).show()
         self.tiles[c] = TileStitcher.Tile(images[c])
     for c in np.ndindex(self.tiles.shape):
         self.tiles[c].meet_neighbours(c, self.tiles)
示例#24
0
    def make_aperture(self):
        circle = np.array([(self.x-x)**2/self.r**2 + (self.y-y)**2/self.r**2 \
                           for x,y in np.ndindex(self.data.shape)])

        mask = np.array([True if circle[x] <= 1 else False \
                         for x in np.ndindex(circle.shape)])

        mask = mask.reshape(self.data.shape)

        return mask.astype('float')
示例#25
0
def get_power_index(axes,dgrid,zerocentered,irred=False,fourier=True):

    """
        Returns the index of the Fourier grid points in a numpy
        array, ordered following the zerocentered flag.

        Parameters
        ----------
        axes : ndarray
            An array with the length of each axis.

        dgrid : ndarray
            An array with the pixel length of each axis.

        zerocentered : bool
            Whether the output array should be zerocentered, i.e. starting with
            negative Fourier modes going over the zero mode to positive modes,
            or not zerocentered, where zero, positive and negative modes are
            simpy ordered consecutively.

        irred : bool : *optional*
            If True, the function returns an array of all k-vector lengths and
            their degeneracy factors. If False, just the power index array is
            returned.

        fourier : bool : *optional*
            Whether the output should be in Fourier space or not
            (default=False).

        Returns
        -------
            index or {klength, rho} : scalar or list
                Returns either an array of all k-vector lengths and
                their degeneracy factors or just the power index array
                depending on the flag irred.

    """

    ## kdict, klength
    if(np.any(zerocentered==False)):
        kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
    else:
        kdict = nkdict_fast(axes,dgrid,fourier)
    klength = nklength(kdict)
    ## output
    if(irred):
        rho = np.zeros(klength.shape,dtype=np.int)
        for ii in np.ndindex(kdict.shape):
            rho[np.searchsorted(klength,kdict[ii])] += 1
        return klength,rho
    else:
        ind = np.empty(axes,dtype=np.int)
        for ii in np.ndindex(kdict.shape):
            ind[ii] = np.searchsorted(klength,kdict[ii])
        return ind
示例#26
0
def test_optimal_angle_filter():
	image = pylab.imread(sys.argv[1])
	image = np.mean(image, axis=2)
	image = image[::-1]
	pylab.gray()

	f = AngleFilter((7, 7))

	components = f.get_component_values(image, mode='same')

	angles = np.radians(range(0, 180, 1))
	def best_angle_value(c):
		values = f.basis.response_at_angle(c, angles)
		return angles[np.argmax(values)], np.max(values)

	for y, x in np.ndindex(components.shape[:2]):
		if y%4 != 0: continue
		if x%4 != 0: continue
		maxval = f.basis.max_response_value(components[y,x])
		if maxval < 2: continue
		maxang_an = f.basis.max_response_angle(components[y,x])
		maxang, maxval = best_angle_value(components[y,x])
		pylab.scatter(x, y)
		dy = -5.0
		dx, dy = np.array((np.cos(maxang), np.sin(maxang)))*10
		#dx = np.tan(maxang_an)*dy
		#d = d/np.linalg.norm(d)*3
		pylab.arrow(x, y, dx, dy, color='blue')

		#d = np.array((-np.sin(maxang_an), -np.cos(maxang_an)))
		#d = d/np.linalg.norm(d)*3
		#pylab.arrow(x, y, d[0], d[1], color='green')

	
		#pylab.plot(x, y, '.')

	pylab.imshow(image, origin="lower")
	#pylab.xlim(0, components.shape[1])
	#pylab.ylim(components.shape[0], 0)
	pylab.show()
	return
	
	#pylab.subplot(1,2,1)
	#pylab.imshow(image)
	#pylab.subplot(1,2,2)
	filtered = np.zeros(image.shape)
	for y, x in np.ndindex(components.shape[:2]):
		maxval = f.basis.max_response_value(components[y,x])
		minval = f.basis.min_response_value(components[y,x])
		#print maxval, minval
		filtered[y,x] = maxval
		#filtered[y, x] = best_angle_value(components[y,x])
	#pylab.hist(filtered.flatten())
	pylab.imshow(filtered > 3)
	pylab.show()
    def fit(self, X, y):
        a = np.zeros((24, 7))
        hours = np.copy(X[:, 1])
        weekdays = np.copy(X[:, 2])
        hours = 23 * normalize(hours)
        weekdays = 6 * normalize(weekdays)

        if self.strategy == 'mean':
            counts = a.copy()
            for i, row in enumerate(zip(hours, weekdays)):
                hour = int(row[0])
                day = int(row[1])
                counts[hour, day] += 1
                a[hour, day] += y[i]

            counts[counts == 0] = 1
            self._model = a / counts

        elif self.strategy in ('median', 'kernel'):

            # this is a 3d array 
            groups = [[[] for i in range(7)] for j in range(24)]

            for i, row in enumerate(zip(hours, weekdays)):
                hour = int(row[0])
                day = int(row[1])
                groups[hour][day].append(y[i])

            if self.strategy == 'median':
                for i, j in np.ndindex((24, 7)):
                    if groups[i][j]:
                        a[i,j] = np.median(groups[i][j])
                    else:
                        a[i,j] = np.nan
            elif self.strategy == 'kernel':
                # kernel method computes a kernel density for each of the
                # bins and determines the most probably value ('mode' of sorts)
                grid = np.linspace(np.min(y), np.max(y), 1000)[:, np.newaxis]
                for i, j in np.ndindex((24, 7)):
                    if groups[i][j]:
                        npgroups = np.array(groups[i][j])[np.newaxis]
                        kernel = KernelDensity(kernel='gaussian', \
                                                bandwidth=0.2).fit(npgroups.T)
                        density = kernel.score_samples(grid)
                        dmax = np.max(density)
                        imax = np.where(density==dmax)
                        a[i,j] = grid[imax, 0]
                    else:
                        a[i,j] = np.nan

            self._model = a

        # smooth the model here if there are nans
        return self
示例#28
0
    def add_mean(self, mean):
        assert(self.shape==mean.shape)

        if self.Fourier:
            ind=self.mean_index()
            for di in np.ndindex(*self.shape):
                self.val[di+ind]=mean[di]*self.fft_coef
        else:
            for di in np.ndindex(*self.shape):
                self.val[di]+=mean[di]
        return self
示例#29
0
def test_cdot_grad(comm):
    pm = ParticleMesh(BoxSize=8.0, Nmesh=[4, 4], comm=comm, dtype='f8')

    comp1 = pm.generate_whitenoise(1234, type='complex', mean=1)
    comp2 = pm.generate_whitenoise(1235, type='complex', mean=1)

    def objective(comp1, comp2):
        return comp1.cdot(comp2).real

    y = objective(comp1, comp2)

    grad_comp2 = comp1.cdot_vjp(1.0)
    grad_comp1 = comp2.cdot_vjp(1.0)

    grad_comp1.decompress_vjp(grad_comp1)
    grad_comp2.decompress_vjp(grad_comp2)

    print(grad_comp1)
    print("comp1")
    ng = []
    ag = []
    ind = []
    dx = 1e-7
    for ind1 in numpy.ndindex(*(list(comp1.cshape) + [2])):
        dx1, c1 = perturb(comp1, ind1, dx)
        ng1 = (objective(c1, comp2) - objective(comp1, comp2)) / dx
        ag1 = grad_comp1.cgetitem(ind1) * dx1 / dx
        if abs(ag1 - ng1) > 1e-5 * max((abs(ag1), abs(ng1))):
            print (ind1, 'a', ag1, 'n', ng1)
        comm.barrier()
        ng.append(ng1)
        ag.append(ag1)
        ind.append(ind1)

    assert_allclose(ng, ag, rtol=1e-5)

    print("comp2")

    ng = []
    ag = []
    ind = []
    dx = 1e-7
    for ind1 in numpy.ndindex(*(list(comp1.cshape) + [2])):
        dx1, c2 = perturb(comp2, ind1, dx)
        ng1 = (objective(comp1, c2) - objective(comp1, comp2)) / dx
        ag1 = grad_comp2.cgetitem(ind1) * dx1 / dx
        if abs(ag1 - ng1) > 1e-5 * max((abs(ag1), abs(ng1))):
            print (ind1, 'a', ag1, 'n', ng1)
        comm.barrier()
        ng.append(ng1)
        ag.append(ag1)
        ind.append(ind1)

    assert_allclose(ng, ag, rtol=1e-5)
示例#30
0
 def update_grid(self, input_vec):
     ind_min, dist_min = (0, 0), float('inf')
     for ind in np.ndindex(self.grid_shape):
         dist = self.dist_func(self.grid[ind], input_vec)
         if dist < dist_min:
             ind_min, dist_min = ind, dist
     for ind in np.ndindex(self.grid_shape):
         neuron = self.grid[ind]
         dist = np.subtract(input_vec, neuron)
         delta = self.neighbor_weight(ind_min, ind) * dist
         self.grid[ind] += delta
示例#31
0
    def cf_label_data(self, cf_data_var):
        """
        Return the associated CF-netCDF label variable strings.

        Args:

        * cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
            The CF-netCDF data variable which the CF-netCDF label variable describes.

        Returns:
            String labels.

        """

        if not isinstance(cf_data_var, CFDataVariable):
            raise TypeError(
                'cf_data_var argument should be of type CFDataVariable. Got %r.'
                % type(cf_data_var))

        # Determine the name of the label string (or length) dimension by
        # finding the dimension name that doesn't exist within the data dimensions.
        str_dim_name = list(set(self.dimensions) - set(cf_data_var.dimensions))

        if len(str_dim_name) != 1:
            raise ValueError(
                'Invalid string dimensions for CF-netCDF label variable %r' %
                self.cf_name)

        str_dim_name = str_dim_name[0]
        label_data = self[:]

        if isinstance(label_data, ma.MaskedArray):
            label_data = label_data.filled()

        # Determine whether we have a string-valued scalar label
        # i.e. a character variable that only has one dimension (the length of the string).
        if self.ndim == 1:
            data = np.array([''.join(label_data).strip()])
        else:
            # Determine the index of the string dimension.
            str_dim = self.dimensions.index(str_dim_name)

            # Calculate new label data shape (without string dimension) and create payload array.
            new_shape = tuple(dim_len for i, dim_len in enumerate(self.shape)
                              if i != str_dim)
            string_basetype = '|S%d' if six.PY2 else '|U%d'
            string_dtype = string_basetype % self.shape[str_dim]
            data = np.empty(new_shape, dtype=string_dtype)

            for index in np.ndindex(new_shape):
                # Create the slice for the label data.
                if str_dim == 0:
                    label_index = (slice(None, None), ) + index
                else:
                    label_index = index + (slice(None, None), )

                label_string = b''.join(label_data[label_index]).strip()
                if six.PY3:
                    label_string = label_string.decode('utf8')
                data[index] = label_string

        return data
示例#32
0
    def init(self):

        
        image = Image.open(self.fname,'r')

        print self.fname

        image = image.convert('RGB')

        image = image.convert('RGBA')

        #image.putalpha(50)

        self.size = image.size

        col = [self.red,self.green, self.blue]

        for x,y in np.ndindex(self.size[0],self.size[1]):

            r,g,b,a=image.getpixel((x,y))

            #print x,y,r,g,b,a

            if self.red:

                ra=1

            else:

                ra=0

            if self.green:

                ga=1
                
            else:

                ga=0

            if self.blue:

                ba=1

            else:

                ba=0
                  

            image.putpixel((x,y),(r*ra,g*ga,b*ba,a))

            #image.putpixel((x,y),(r,g,b,a))
                

        image=image.tostring("raw",image.mode, 0, -1)

        self.texture_index = gl.glGenTextures(1)

        gl.glBindTexture(gl.GL_TEXTURE_2D, self.texture_index)

        gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT,1)

        gl.glPixelStorei(gl.GL_PACK_ALIGNMENT,1)

        gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_REPEAT)

        gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_REPEAT)

        gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)

        gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)

        

        for o in self.orbits:

            self.init_texture(image)
示例#33
0
文件: 11.py 项目: stgn/aoc18
def part_one(size_bests):
    x, y = size_bests[0][1:3]
    return f'{x},{y}'


def part_two(size_bests):
    x, y, size = max(size_bests)[1:]
    return f'{x},{y},{size}'


if __name__ == '__main__':
    serial = int(sys.argv[1])

    grid = np.empty(GRID_SIZE, np.int32)

    for y, x in np.ndindex(GRID_SIZE):
        rack_id = x + 11
        power = ((y + 1) * rack_id + serial) * rack_id
        power = (power // 100) % 10 - 5
        grid[y, x] = power

    size_bests = []

    for size in range(3, 301):
        windows = sliding_sum(grid, size)
        power, x, y = max(windows)
        if power < 0:
            break
        size_bests.append((power, x + 1, y + 1, size))

    print(part_one(size_bests))
示例#34
0
    def __call__(
        self,
        x: np.ndarray,
        y: Optional[np.ndarray] = None
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        Apply JPEG compression to sample `x`.

        For input images or videos with 3 color channels the compression is applied in mode `RGB`
        (3x8-bit pixels, true color), for all other numbers of channels the compression is applied for each channel with
        mode `L` (8-bit pixels, black and white).

        :param x: Sample to compress with shape of `NCHW`, `NHWC`, `NCFHW` or `NFHWC`. `x` values are expected to be in
                  the data range [0, 1] or [0, 255].
        :param y: Labels of the sample `x`. This function does not affect them in any way.
        :return: compressed sample.
        """
        x_ndim = x.ndim
        if x_ndim not in [4, 5]:
            raise ValueError(
                "Unrecognized input dimension. JPEG compression can only be applied to image and video data."
            )

        if x.min() < 0.0:
            raise ValueError(
                "Negative values in input `x` detected. The JPEG compression defence requires unnormalized input."
            )

        # Swap channel index
        if self.channels_first and x_ndim == 4:
            # image shape NCHW to NHWC
            x = np.transpose(x, (0, 2, 3, 1))
        elif self.channels_first and x_ndim == 5:
            # video shape NCFHW to NFHWC
            x = np.transpose(x, (0, 2, 3, 4, 1))

        # insert temporal dimension to image data
        if x_ndim == 4:
            x = np.expand_dims(x, axis=1)

        # Convert into uint8
        if self.clip_values[1] == 1.0:
            x = x * 255
        x = x.astype("uint8")

        # Compress one image at a time
        x_jpeg = x.copy()
        for idx in tqdm(np.ndindex(x.shape[:2]),
                        desc="JPEG compression",
                        disable=not self.verbose):
            if x.shape[-1] == 3:
                x_jpeg[idx] = self._compress(x[idx], mode="RGB")
            else:
                for i_channel in range(x.shape[-1]):
                    x_channel = x[idx[0], idx[1], ..., i_channel]
                    x_channel = self._compress(x_channel, mode="L")
                    x_jpeg[idx[0], idx[1], :, :, i_channel] = x_channel

        # Convert to ART dtype
        if self.clip_values[1] == 1.0:
            x_jpeg = x_jpeg / 255.0
        x_jpeg = x_jpeg.astype(ART_NUMPY_DTYPE)

        # remove temporal dimension for image data
        if x_ndim == 4:
            x_jpeg = np.squeeze(x_jpeg, axis=1)

        # Swap channel index
        if self.channels_first and x_jpeg.ndim == 4:
            # image shape NHWC to NCHW
            x_jpeg = np.transpose(x_jpeg, (0, 3, 1, 2))
        elif self.channels_first and x_ndim == 5:
            # video shape NFHWC to NCFHW
            x_jpeg = np.transpose(x_jpeg, (0, 4, 1, 2, 3))
        return x_jpeg, y
示例#35
0
def affine_transform(image,
                     matrix,
                     offset=0.0,
                     output_shape=None,
                     order=1,
                     output_chunks=None,
                     **kwargs):
    """Apply an affine transform using Dask. For every
    output chunk, only the slice containing the relevant part
    of the image is processed. Chunkwise processing is performed
    either using `ndimage.affine_transform` or
    `cupyx.scipy.ndimage.affine_transform`, depending on the input type.

    Notes
    -----
        Differences to `ndimage.affine_transformation`:
        - currently, prefiltering is not supported
          (affecting the output in case of interpolation `order > 1`)
        - default order is 1
        - modes 'reflect', 'mirror' and 'wrap' are not supported

        Arguments equal to `ndimage.affine_transformation`,
        except for `output_chunks`.

    Parameters
    ----------
    image : array_like (Numpy Array, Cupy Array, Dask Array...)
        The image array.
    matrix : array (ndim,), (ndim, ndim), (ndim, ndim+1) or (ndim+1, ndim+1)
        Transformation matrix.
    offset : float or sequence, optional
        The offset into the array where the transform is applied. If a float,
        `offset` is the same for each axis. If a sequence, `offset` should
        contain one value for each axis.
    output_shape : tuple of ints, optional
        The shape of the array to be returned.
    order : int, optional
        The order of the spline interpolation. Note that for order>1
        scipy's affine_transform applies prefiltering, which is not
        yet supported and skipped in this implementation.
    output_chunks : tuple of ints, optional
        The shape of the chunks of the output Dask Array.

    Returns
    -------
    affine_transform : Dask Array
        A dask array representing the transformed output

    """

    if not type(image) == da.core.Array:
        image = da.from_array(image)

    if output_shape is None:
        output_shape = image.shape

    if output_chunks is None:
        output_chunks = image.shape

    # Perform test run to ensure parameter validity.
    ndimage_affine_transform(np.zeros([0] * image.ndim), matrix, offset)

    # Make sure parameters contained in matrix and offset
    # are not overlapping, i.e. that the offset is valid as
    # it needs to be modified for each chunk.
    # Further parameter checks are performed directly by
    # `ndimage.affine_transform`.

    matrix = np.asarray(matrix)
    offset = np.asarray(offset).squeeze()

    # these lines were copied and adapted from `ndimage.affine_transform`
    if (matrix.ndim == 2 and matrix.shape[1] == image.ndim + 1
            and (matrix.shape[0] in [image.ndim, image.ndim + 1])):

        # assume input is homogeneous coordinate transformation matrix
        offset = matrix[:image.ndim, image.ndim]
        matrix = matrix[:image.ndim, :image.ndim]

    # process kwargs
    # prefilter is not yet supported
    if 'prefilter' in kwargs:
        if kwargs['prefilter'] and order > 1:
            warnings.warn(
                'Currently, `dask_image.ndinterp.affine_transform` '
                'doesn\'t support `prefilter=True`. Proceeding with'
                ' `prefilter=False`, which if order > 1 can lead '
                'to the output containing more blur than with '
                'prefiltering.', UserWarning)
        del kwargs['prefilter']

    if 'mode' in kwargs:
        if kwargs['mode'] in ['wrap', 'reflect', 'mirror']:
            raise (NotImplementedError("Mode %s is not currently supported." %
                                       kwargs['mode']))

    n = image.ndim
    image_shape = image.shape

    # calculate output array properties
    normalized_chunks = da.core.normalize_chunks(output_chunks,
                                                 tuple(output_shape))
    block_indices = product(*(range(len(bds)) for bds in normalized_chunks))
    block_offsets = [np.cumsum((0, ) + bds[:-1]) for bds in normalized_chunks]

    # use dispatching mechanism to determine backend
    affine_transform_method = dispatch_affine_transform(image)
    asarray_method = dispatch_asarray(image)

    # construct dask graph for output array
    # using unique and deterministic identifier
    output_name = 'affine_transform-' + tokenize(
        image, matrix, offset, output_shape, output_chunks, kwargs)
    output_layer = {}
    rel_images = []
    for ib, block_ind in enumerate(block_indices):

        out_chunk_shape = [
            normalized_chunks[dim][block_ind[dim]] for dim in range(n)
        ]
        out_chunk_offset = [
            block_offsets[dim][block_ind[dim]] for dim in range(n)
        ]

        out_chunk_edges = np.array([i for i in np.ndindex(tuple([2] * n))])\
            * np.array(out_chunk_shape) + np.array(out_chunk_offset)

        # map output chunk edges onto input image coordinates
        # to define the input region relevant for the current chunk
        if matrix.ndim == 1 and len(matrix) == image.ndim:
            rel_image_edges = matrix * out_chunk_edges + offset
        else:
            rel_image_edges = np.dot(matrix, out_chunk_edges.T).T + offset

        rel_image_i = np.min(rel_image_edges, 0)
        rel_image_f = np.max(rel_image_edges, 0)

        # Calculate edge coordinates required for the footprint of the
        # spline kernel according to
        # https://github.com/scipy/scipy/blob/9c0d08d7d11fc33311a96d2ac3ad73c8f6e3df00/scipy/ndimage/src/ni_interpolation.c#L412-L419 # noqa: E501
        # Also see this discussion:
        # https://github.com/dask/dask-image/issues/24#issuecomment-706165593 # noqa: E501
        for dim in range(n):

            if order % 2 == 0:
                rel_image_i[dim] += 0.5
                rel_image_f[dim] += 0.5

            rel_image_i[dim] = np.floor(rel_image_i[dim]) - order // 2
            rel_image_f[dim] = np.floor(rel_image_f[dim]) - order // 2 + order

            if order == 0:  # required for consistency with scipy.ndimage
                rel_image_i[dim] -= 1

        # clip image coordinates to image extent
        for dim, s in zip(range(n), image_shape):
            rel_image_i[dim] = np.clip(rel_image_i[dim], 0, s - 1)
            rel_image_f[dim] = np.clip(rel_image_f[dim], 0, s - 1)

        rel_image_slice = tuple([
            slice(int(rel_image_i[dim]),
                  int(rel_image_f[dim]) + 2) for dim in range(n)
        ])

        rel_image = image[rel_image_slice]
        """Block comment for future developers explaining how `offset` is
        transformed into `offset_prime` for each output chunk.
        Modify offset to point into cropped image.
        y = Mx + o
        Coordinate substitution:
        y' = y - y0(min_coord_px)
        x' = x - x0(chunk_offset)
        Then:
        y' = Mx' + o + Mx0 - y0
        M' = M
        o' = o + Mx0 - y0
        """

        offset_prime = offset + np.dot(matrix, out_chunk_offset) - rel_image_i

        output_layer[(output_name, ) + block_ind] = (
            affine_transform_method,
            (da.core.concatenate3, rel_image.__dask_keys__()),
            asarray_method(matrix),
            offset_prime,
            tuple(out_chunk_shape),  # output_shape
            None,  # out
            order,
            'constant' if 'mode' not in kwargs else kwargs['mode'],
            0. if 'cval' not in kwargs else kwargs['cval'],
            False  # prefilter
        )

        rel_images.append(rel_image)

    graph = HighLevelGraph.from_collections(output_name,
                                            output_layer,
                                            dependencies=[image] + rel_images)

    meta = dispatch_asarray(image)([0]).astype(image.dtype)

    transformed = da.Array(
        graph,
        output_name,
        shape=tuple(output_shape),
        # chunks=output_chunks,
        chunks=normalized_chunks,
        meta=meta)

    return transformed
示例#36
0
    # calibrate a kernel
    kernel_size = (5, 5)

    # undersample by a factor of 2 in both kx and ky
    kspace[::2, 1::2, :] = 0
    kspace[1::2, ::2, :] = 0

    # reconstruct:
    simple_pruno(kspace,
                 calib,
                 kernel_size,
                 coil_axis=-1,
                 sens=mps,
                 ph=ph,
                 kspace_ref=kspace_ref)
    assert False

    # Take a look
    res = np.abs(
        np.sqrt(N**2) * np.fft.fftshift(
            np.fft.ifft2(np.fft.ifftshift(res, axes=ax), axes=ax), axes=ax))
    res0 = np.zeros((2 * N, 2 * N))
    kk = 0
    for idx in np.ndindex((2, 2)):
        ii, jj = idx[:]
        res0[ii * N:(ii + 1) * N, jj * N:(jj + 1) * N] = res[..., kk]
        kk += 1
    plt.imshow(res0, cmap='gray')
    plt.show()
# Lower left of grid
start = (h - 1, 0)

# End location:
# Top right of grid
dx = w - 1
dy = 0

# Blank grid
a = np.zeros((w, h))

# Distance grid
dist = np.zeros(a.shape, dtype=np.int8)

# Calculate distance for all cells
for y, x in np.ndindex(a.shape):
    dist[y][x] = abs((dx - x) + (dy - y))

# "Terrain" is a random value between 1-16.
# Add to the distance grid to calculate
# The cost of moving to a cell
cost = np.random.randint(1, 16, (w, h)) + dist

print("COST GRID (Value + Distance)")
print(cost)
print()
print("(Y, X), HEURISTIC, DISTANCE")

# Find the path
path = astar(start, (dy, dx), cost, dist)
print()
示例#38
0
def symarray(prefix, shape, **kwargs):  # pragma: no cover
    r"""Create a numpy ndarray of symbols (as an object array).

    The created symbols are named ``prefix_i1_i2_``...  You should thus provide a
    non-empty prefix if you want your symbols to be unique for different output
    arrays, as SymPy symbols with identical names are the same object.

    Parameters
    ----------

    prefix : string
      A prefix prepended to the name of every symbol.

    shape : int or tuple
      Shape of the created array.  If an int, the array is one-dimensional; for
      more than one dimension the shape must be a tuple.

    \*\*kwargs : dict
      keyword arguments passed on to Symbol

    Examples
    ========
    These doctests require numpy.

    >>> from sympy import symarray
    >>> symarray('', 3)
    [_0 _1 _2]

    If you want multiple symarrays to contain distinct symbols, you *must*
    provide unique prefixes:

    >>> a = symarray('', 3)
    >>> b = symarray('', 3)
    >>> a[0] == b[0]
    True
    >>> a = symarray('a', 3)
    >>> b = symarray('b', 3)
    >>> a[0] == b[0]
    False

    Creating symarrays with a prefix:

    >>> symarray('a', 3)
    [a_0 a_1 a_2]

    For more than one dimension, the shape must be given as a tuple:

    >>> symarray('a', (2, 3))
    [[a_0_0 a_0_1 a_0_2]
     [a_1_0 a_1_1 a_1_2]]
    >>> symarray('a', (2, 3, 2))
    [[[a_0_0_0 a_0_0_1]
      [a_0_1_0 a_0_1_1]
      [a_0_2_0 a_0_2_1]]
    <BLANKLINE>
     [[a_1_0_0 a_1_0_1]
      [a_1_1_0 a_1_1_1]
      [a_1_2_0 a_1_2_1]]]

    For setting assumptions of the underlying Symbols:

    >>> [s.is_real for s in symarray('a', 2, real=True)]
    [True, True]
    """
    from numpy import empty, ndindex
    arr = empty(shape, dtype=object)
    for index in ndindex(shape):
        arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))),
                            **kwargs)
    return arr
示例#39
0
    def _calc(self, signal):
        """
        Perform the ALS. Called from self.calculate (defined in 
        AbstractBaseline parent class)
        
        Parameter
        ---------
        signal : ndarray (>= 1D)
            Input signal
            
        Returns
        -------
        baseline : ndarray
            Baseline of input signal
        """
        sig_shape = signal.shape  # Shape of input signal
        #        sig_ndim = signal.ndim  # N Signal dimensions
        sig_size = signal.shape[-1]  # Length of spectral axis

        # N signals to detrend
        sig_n_to_detrend = int(signal.size / signal.shape[-1])

        baseline_output = _np.zeros(sig_shape)

        # Cute linalg trick to create 2nd-order derivative transform matrix
        difference_matrix = _np.diff(_np.eye(sig_size), n=self.order, axis=0)

        # Convert into sparse matrix
        difference_matrix = _cvxopt.sparse(_cvxopt.matrix(difference_matrix))

        for ct, coords in enumerate(_np.ndindex(signal.shape[0:-1])):
            signal_current = signal[coords]

            penalty_vector = _np.ones([sig_size])
            baseline_current = _np.zeros([sig_size])
            baseline_last = _np.zeros([sig_size])

            # Iterative asymmetric least squares smoothing
            for ct_iter in range(self.max_iter):
                penalty_matrix = _cvxopt.spdiag(list(penalty_vector))

                minimazation_matrix = (
                    penalty_matrix +
                    _cvxopt.mul(self.smoothness_param, difference_matrix.T) *
                    difference_matrix)

                x = _cvxopt.matrix(penalty_vector[:] * signal_current)

                try:
                    # Cholesky factorization A = LL'
                    # Solve A * baseline_current = w_sp * Signal
                    _cholmod.linsolve(minimazation_matrix, x, uplo='U')

                except:
                    print('Failure in Cholesky factorization')
                    break
                else:
                    if ct_iter > 0:
                        baseline_last = baseline_current

                    baseline_current = _np.array(x).squeeze()

                    if ct_iter > 0:  # Difference check b/w iterations
                        differ = _np.abs(
                            _np.sum(baseline_current - baseline_last, axis=0))

                        if differ < self.min_diff:
                            break

                    # Apply asymmetric penalization
                    penalty_vector = _np.squeeze(
                        self.asym_param *
                        (signal_current >= baseline_current) +
                        (1 - self.asym_param) *
                        (signal_current < baseline_current))
                    if self.fix_end_points:
                        penalty_vector[0] = 1
                        penalty_vector[-1] = 1

                    if self.fix_rng is not None:
                        penalty_vector[self.fix_rng] = 1

            baseline_output[coords] = baseline_current

            if self.verbose:
                print('Number of iterations to converge: {}'.format(ct_iter))
                print('Finished detrending spectra {}/{}'.format(
                    ct + 1, sig_n_to_detrend))

        return baseline_output
示例#40
0
def check_grad(func,
               inputs=None,
               eps=1e-6,
               atol=1e-5,
               rtol=1e-3,
               scale=None,
               mean=0):
    """Perform numerical gradient checking given a relay function.

    Compare analytical gradients to numerical gradients derived from two-sided approximation. Note
    that this test may fail if your function input types are not of high enough precision.

    Parameters
    ----------
    func : tvm.relay.Function
        The relay function to test.

    inputs: List[np.array]
        Optional user-provided input parameters to use. If not given, will generate random normal
        inputs scaled to be close to the chosen epsilon value to avoid numerical precision loss.

    eps: float
        The epsilon value to use for computing numerical gradient approximation.

    atol: float
        The absolute tolerance on difference between numerical and analytical gradients. Note that
        this needs to be scaled appropriately relative to the chosen eps and inputs.

    rtol: float
        The relative tolerance on difference between numerical and analytical gradients. Note that
        this needs to be scaled appropriately relative to the chosen eps.

    scale: float
        The standard deviation of the inputs.

    mean: float
        The mean of the inputs.
    """

    fwd_func = run_infer_type(func)
    bwd_func = run_infer_type(gradient(fwd_func))

    if scale is None:
        scale = 10 * eps

    if inputs is None:
        params = fwd_func.params
        # Generate random inputs on the same scale as epsilon to avoid numerical precision loss.
        inputs = [
            _np_randn_from_type(x.checked_type, scale=scale, mean=mean)
            for x in params
        ]

    for target, ctx in ctx_list():
        intrp = relay.create_executor(ctx=ctx, target=target)

        # Get analytic gradients.
        _, grads = intrp.evaluate(bwd_func)(*inputs)
        grads = [grad.asnumpy().astype("float64") for grad in grads]

        # Get numeric gradients for each dimension of each param, using two-sided approximation.
        approx_grads = []
        for x in inputs:
            approx_grad = np.zeros(x.shape)
            for i in np.ndindex(*x.shape):
                x_i = x[i]
                x[i] = x_i + eps
                fwd_plus = intrp.evaluate(fwd_func)(
                    *inputs).asnumpy().astype("float64")
                x[i] = x_i - eps
                fwd_minus = intrp.evaluate(fwd_func)(
                    *inputs).asnumpy().astype("float64")
                x[i] = x_i
                approx_grad[i] = np.sum((fwd_plus - fwd_minus) / (2 * eps))
            approx_grads.append(approx_grad)

        # Compare gradients by checking that relative difference is below tolerance.
        for grad, approx_grad in zip(grads, approx_grads):
            np.testing.assert_allclose(grad, approx_grad, atol=atol, rtol=rtol)
示例#41
0
def _calculate_mobility(
    amset_data: AmsetData,
    rate_idx: Union[int, List[int], np.ndarray],
    pbar_label: str = "mobility",
):
    if isinstance(rate_idx, int):
        rate_idx = [rate_idx]

    volume = amset_data.structure.volume
    mobility = np.zeros(amset_data.fermi_levels.shape + (3, 3))

    epsilon, dos = amset_data.tetrahedral_band_structure.get_density_of_states(
        amset_data.dos.energies, sum_spins=True, use_cached_weights=True)

    if pbar_label is not None:
        pbar = get_progress_bar(iterable=list(
            np.ndindex(amset_data.fermi_levels.shape)),
                                desc=pbar_label)
    else:
        pbar = list(np.ndindex(amset_data.fermi_levels.shape))

    for n, t in pbar:
        br = {
            s: np.arange(len(amset_data.energies[s]))
            for s in amset_data.spins
        }
        cb_idx = {s: amset_data.vb_idx[s] + 1 for s in amset_data.spins}

        if amset_data.doping[n] < 0:
            band_idx = {s: br[s][cb_idx[s]:] for s in amset_data.spins}
        else:
            band_idx = {s: br[s][:cb_idx[s]] for s in amset_data.spins}

        lifetimes = {
            s:
            1 / np.sum(amset_data.scattering_rates[s][rate_idx, n, t], axis=0)
            for s in amset_data.spins
        }

        # Nones are required as BoltzTraP2 expects the Fermi and temp as arrays
        fermi = amset_data.fermi_levels[n, t][None]
        temp = amset_data.temperatures[t][None]

        # obtain the Fermi integrals for the temperature and doping
        vvdos = get_transport_dos(
            amset_data.tetrahedral_band_structure,
            amset_data.velocities_product,
            lifetimes,
            amset_data.dos.energies,
            band_idx=band_idx,
        )

        c, l0, l1, l2, lm11 = fermiintegrals(
            epsilon,
            dos,
            vvdos,
            mur=fermi,
            Tr=temp,
            dosweight=amset_data.dos.dos_weight)

        # Compute the Onsager coefficients from Fermi integrals
        sigma, _, _, _ = calc_Onsager_coefficients(l0, l1, l2, fermi, temp,
                                                   volume)

        if amset_data.doping[n] < 0:
            carrier_conc = amset_data.electron_conc[n, t]
        else:
            carrier_conc = amset_data.hole_conc[n, t]

        # don't use c as we don't use the correct DOS each time
        # c = -c[0, ...] / (volume / (Meter / 100.)**3)

        # convert mobility to cm^2/V.s
        uc = 0.01 / (e_si * carrier_conc * (1 / bohr_to_cm)**3)
        mobility[n, t] = sigma[0, ...] * uc

    return mobility
示例#42
0
文件: trajectory.py 项目: zklaus/iris
def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None):
    """
    Returns the indices to select the data value(s) closest to the given
    coordinate point values.

    'sample_points' is of the form [[coord-or-coord-name, point-value(s)]*].
    The lengths of all the point-values sequences must be equal.

    This function is adapted for points sampling a multi-dimensional coord,
    and can currently only do nearest neighbour interpolation.

    Because this function can be slow for multidimensional coordinates,
    a 'cache' dictionary can be provided by the calling code.

    .. Note::

        If the points are longitudes/latitudes, these are handled correctly as
        points on the sphere, but the values must be in 'degrees'.

    Developer notes:
    A "sample space cube" is made which only has the coords and dims we are
    sampling on.
    We get the nearest neighbour using this sample space cube.

    """
    if sample_points:
        try:
            coord, value = sample_points[0]
        except (KeyError, ValueError):
            emsg = ('Sample points must be a list of '
                    '(coordinate, value) pairs, got {!r}.')
            raise TypeError(emsg.format(sample_points))

    # Convert names to coords in sample_point and reformat sample point values
    # for use in `_cartesian_sample_points()`.
    coord_values = []
    sample_point_coords = []
    sample_point_coord_names = []
    ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
    for coord, value in sample_points:
        coord = cube.coord(coord)
        if id(coord) not in ok_coord_ids:
            msg = ('Invalid sample coordinate {!r}: derived coordinates are'
                   ' not allowed.'.format(coord.name()))
            raise ValueError(msg)
        sample_point_coords.append(coord)
        sample_point_coord_names.append(coord.name())
        value = np.array(value, ndmin=1)
        coord_values.append(value)

    coord_point_lens = np.array([len(value) for value in coord_values])
    if not np.all(coord_point_lens == coord_point_lens[0]):
        msg = 'All coordinates must have the same number of sample points.'
        raise ValueError(msg)

    coord_values = np.array(coord_values)

    # Which dims are we sampling?
    sample_dims = set()
    for coord in sample_point_coords:
        for dim in cube.coord_dims(coord):
            sample_dims.add(dim)
    sample_dims = sorted(list(sample_dims))

    # Extract a sub cube that lives in just the sampling space.
    sample_space_slice = [0] * cube.ndim
    for sample_dim in sample_dims:
        sample_space_slice[sample_dim] = slice(None, None)
    sample_space_slice = tuple(sample_space_slice)
    sample_space_cube = cube[sample_space_slice]

    # Just the sampling coords.
    for coord in sample_space_cube.coords():
        if not coord.name() in sample_point_coord_names:
            sample_space_cube.remove_coord(coord)

    # Order the sample point coords according to the sample space cube coords.
    sample_space_coord_names = \
        [coord.name() for coord in sample_space_cube.coords()]
    new_order = [
        sample_space_coord_names.index(name)
        for name in sample_point_coord_names
    ]
    coord_values = np.array([coord_values[i] for i in new_order])
    sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]

    sample_space_coords = \
        sample_space_cube.dim_coords + sample_space_cube.aux_coords
    sample_space_coords_and_dims = \
        [(coord, sample_space_cube.coord_dims(coord))
         for coord in sample_space_coords]

    if cache is not None and cube in cache:
        kdtree = cache[cube]
    else:
        # Create a "sample space position" for each
        # `datum.sample_space_data_positions[coord_index][datum_index]`.
        sample_space_data_positions = \
            np.empty((len(sample_space_coords_and_dims),
                      sample_space_cube.data.size),
                     dtype=float)
        for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
            for c, (coord, coord_dims) in \
                    enumerate(sample_space_coords_and_dims):
                # Index of this datum along this coordinate (could be nD).
                if coord_dims:
                    keys = tuple(ndi[ind] for ind in coord_dims)
                else:
                    keys = slice(None, None)
                # Position of this datum along this coordinate.
                sample_space_data_positions[c][d] = coord.points[keys]

        # Convert to cartesian coordinates. Flatten for kdtree compatibility.
        cartesian_space_data_coords = \
            _cartesian_sample_points(sample_space_data_positions,
                                     sample_point_coord_names)

        # Create a kdtree for the nearest-distance lookup to these 3d points.
        kdtree = cKDTree(cartesian_space_data_coords)
        # This can find the nearest datum point to any given target point,
        # which is the goal of this function.

    # Update cache.
    if cache is not None:
        cache[cube] = kdtree

    # Convert the sample points to cartesian (3d) coords.
    # If there is no latlon within the coordinate there will be no change.
    # Otherwise, geographic latlon is replaced with cartesian xyz.
    cartesian_sample_points = _cartesian_sample_points(
        coord_values, sample_point_coord_names)

    # Use kdtree to get the nearest sourcepoint index for each target point.
    _, datum_index_lists = kdtree.query(cartesian_sample_points)

    # Convert flat indices back into multidimensional sample-space indices.
    sample_space_dimension_indices = np.unravel_index(
        datum_index_lists, sample_space_cube.data.shape)
    # Convert this from "pointwise list of index arrays for each dimension",
    # to "list of cube indices for each point".
    sample_space_ndis = np.array(sample_space_dimension_indices).transpose()

    # For the returned result, we must convert these indices into the source
    # (sample-space) cube, to equivalent indices into the target 'cube'.

    # Make a result array: (cube.ndim * <index>), per sample point.
    n_points = coord_values.shape[-1]
    main_cube_slices = np.empty((n_points, cube.ndim), dtype=object)
    # Initialise so all unused indices are ":".
    main_cube_slices[:] = slice(None)

    # Move result indices according to the source (sample) and target (cube)
    # dimension mappings.
    for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
        # Find the coord in the main cube
        main_coord = cube.coord(sample_coord.name())
        main_coord_dims = cube.coord_dims(main_coord)
        # Fill nearest-point data indices for each coord dimension.
        for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
            main_cube_slices[:, main_i] = sample_space_ndis[:, sample_i]

    # Return as a list of **tuples** : required for correct indexing usage.
    result = [tuple(inds) for inds in main_cube_slices]
    return result
示例#43
0
def voxel_down_sample_random(pc, voxel_grid_size=0.2, n_points=35_000):

    # get points
    points = pc[:, :3]
    colors = np.ones_like(points)

    # get boundings of points
    max_ = np.max(points, axis=0)
    min_ = np.min(points, axis=0)
    # move bounding box anchor to origin
    bbox = max_ - min_
    # compute number of voxels in each dimesion
    n_voxels = np.ceil(bbox/voxel_grid_size).astype(np.int32)

    voxel_points = []
    # loop over all voxels
    for index in tqdm(np.ndindex(*n_voxels), total=np.product(n_voxels)):
        # build anchors of current voxels
        anchorA = np.asarray(index) * voxel_grid_size + min_
        anchorB = anchorA + voxel_grid_size
        # get points in current voxel
        point_idx = get_points_in_bbox(points, anchorA, anchorB)
        if len(point_idx) > 0:
            voxel_points.append(point_idx)

    sampled_point_idx = []
    # compute weight of first voxel
    weights = np.asarray([len(idx) / points.shape[0] for idx in voxel_points])
    # weights = 1 - np.asarray([len(idx) / points.shape[0] for idx in voxel_points])
    # weight = weights[0] / np.sum(weights)
    # sample random points from each voxel
    for i, idx in enumerate(voxel_points):
        # get number of points to sample from current voxel
        n_points_from_current = min(ceil(weights[i] * n_points), len(idx))
        # sample points random
        sampled_point_idx += sample(list(idx), n_points_from_current)

        # if i+1 < len(voxel_points):
        #     # update weight
        #     weights = 1 - np.asarray([len(idx) / (points.shape[0] - len(sampled_point_idx)) for idx in voxel_points[i+1:]])
        #     weight = weights[0] / np.sum(weights)

        # color points of current voxel
        colors[idx] = np.random.uniform(0, 1, size=3)

    print(len(sampled_point_idx), n_points, n_voxels)

    # get sampled points
    sampled_point_idx = sample(sampled_point_idx, n_points)
    sampled_points = points[sampled_point_idx, :]
    sampled_colors = colors[sampled_point_idx, :]

    random_point_idx = sample(range(points.shape[0]), n_points)
    random_points = points[random_point_idx, :]
    random_colors = colors[random_point_idx, :]

    # visualize
    vis = Visualizer(background_color=(1, 1, 1))
    vis.add_by_features(points, colors)
    # vis.add_by_features(random_points, random_colors)
    vis.add_by_features(sampled_points, sampled_colors)
    vis.run()
示例#44
0
文件: psf.py 项目: nregnault/batoid
def huygensPSF(optic, theta_x=None, theta_y=None, wavelength=None, nx=None,
               dx=None, dy=None, nxOut=None):
    """Compute a PSF via the Huygens construction.

    Parameters
    ----------
    optic : batoid.Optic
        Optical system
    theta_x, theta_y : float, optional
        Field angle in radians (gnomic tangent plane projection)
    wavelength : float, optional
        Wavelength in meters
    nx : int, optional
        Size of ray grid to use.
    dx, dy : float, optional
        Lattice scales to use for PSF evaluation locations.  Default, use fftPSF lattice.

    Returns
    -------
    psf : batoid.Lattice
        The PSF.

    Notes
    -----
    The Huygens construction is to evaluate the PSF as

    I(x) \propto \Sum_u exp(i phi(u)) exp(i k(u).r)

    The u are assumed to uniformly sample the entrance pupil, but not include any rays that get
    vignetted before they reach the focal plane.  The phis are the phases of the exit rays evaluated
    at a single arbitrary time.  The k(u) indicates the conversion of the uniform entrance pupil
    samples into nearly (though not exactly) uniform samples in k-space of the output rays.

    The output locations where the PSF is evaluated are governed by dx, dy and nx.  If dx and dy are
    None, then the same lattice as in fftPSF will be used.  If dx and dy are scalars, then a lattice
    with primitive vectors [dx, 0] and [0, dy] will be used.  If dx and dy are 2-vectors, then those
    will be the primitive vectors of the output lattice.
    """
    from numbers import Real

    if dx is None:
        primitiveU = np.array([[optic.pupilSize/nx,0], [0, optic.pupilSize/nx]])
        primitiveK = dkdu(optic, theta_x, theta_y, wavelength).dot(primitiveU)
        pad_factor = 2
        primitiveX = np.vstack(
            reciprocalLatticeVectors(primitiveK[0], primitiveK[1], pad_factor*nx)
        )
    elif isinstance(dx, Real):
        if dy is None:
            dy = dx
        primitiveX = np.vstack([[dx, 0], [0, dy]])
        pad_factor = 1
    else:
        primitiveX = np.vstack([dx, dy])
        pad_factor = 1

    if nxOut is None:
        nxOut = nx

    dirCos = gnomicToDirCos(theta_x, theta_y)

    rays = batoid.rayGrid(optic.dist, optic.pupilSize,
        dirCos[0], dirCos[1], -dirCos[2],
        nx, wavelength=wavelength, flux=1, medium=optic.inMedium)

    amplitudes = np.zeros((nxOut*pad_factor, nxOut*pad_factor), dtype=np.complex128)
    out = batoid.Lattice(np.zeros((nxOut*pad_factor, nxOut*pad_factor), dtype=float), primitiveX)

    rays, outCoordSys = optic.traceInPlace(rays)
    rays.trimVignettedInPlace()
    # Need transpose to conform to numpy [y,x] ordering convention
    xs = out.coords[..., 0].T + np.mean(rays.x)
    ys = out.coords[..., 1].T + np.mean(rays.y)
    zs = np.zeros_like(xs)

    points = np.concatenate([aux[..., None] for aux in (xs, ys, zs)], axis=-1)
    time = rays[0].t
    for idx in np.ndindex(amplitudes.shape):
        amplitudes[idx] = rays.sumAmplitude(points[idx], time)
    return batoid.Lattice(np.abs(amplitudes)**2, primitiveX)
示例#45
0
    def __init__(self,
                 economy: 'Economy',
                 sigma: Optional[Any] = None,
                 pi: Optional[Any] = None,
                 rho: Optional[Any] = None,
                 beta: Optional[Any] = None,
                 gamma: Optional[Any] = None,
                 sigma_bounds: Optional[Tuple[Any, Any]] = None,
                 pi_bounds: Optional[Tuple[Any, Any]] = None,
                 rho_bounds: Optional[Tuple[Any, Any]] = None,
                 beta_bounds: Optional[Tuple[Any, Any]] = None,
                 gamma_bounds: Optional[Tuple[Any, Any]] = None,
                 bounded: bool = False,
                 allow_linear_nans: bool = False) -> None:
        """Coerce parameters into usable formats before storing information about fixed (equal bounds) and unfixed
        (unequal bounds) elements of sigma, pi, rho, beta, and gamma. Also store information about eliminated
        (concentrated out) parameters in beta and gamma. For unfixed parameters, verify that values have been chosen
        such that choice probability computation is unlikely to overflow. If bounds are unspecified, determine
        reasonable bounds as well. If allow_linear_nans is True, allow null linear parameters in order to denote those
        parameters that will be concentrated out.
        """

        # store labels
        self.sigma_labels = [str(f) for f in economy._X2_formulations]
        self.pi_labels = [str(f) for f in economy._demographics_formulations]
        self.rho_labels = [str(i) for i in economy.unique_nesting_ids]
        self.beta_labels = [str(f) for f in economy._X1_formulations]
        self.gamma_labels = [str(f) for f in economy._X3_formulations]

        # validate and store parameters
        self.sigma = self.initialize_matrix("sigma", "X2 was formulated",
                                            sigma, [(economy.K2, economy.K2)])
        self.pi = self.initialize_matrix("pi", "demographics were formulated",
                                         pi, [(economy.K2, economy.D)])
        self.rho = self.initialize_matrix("rho", "nesting IDs were specified",
                                          rho, [(economy.H, 1), (1, 1)])
        self.beta = self.initialize_matrix("beta", "X1 was formulated", beta,
                                           [(economy.K1, 1)],
                                           allow_linear_nans)
        self.gamma = self.initialize_matrix("gamma", "X3 was formulated",
                                            gamma, [(economy.K3, 1)],
                                            allow_linear_nans)

        # fill the lower triangle of sigma with zeros
        self.sigma[np.tril_indices(economy.K2, -1)] = 0

        # identify the index of nonzero columns in sigma
        self.nonzero_sigma_index = np.sum(self.sigma, axis=0) > 0

        # identify the index of alpha in beta
        self.alpha_index = np.zeros_like(self.beta, np.bool)
        for k, formulation in enumerate(economy._X1_formulations):
            if 'prices' in formulation.names:
                self.alpha_index[k] = True

        # identify eliminated indexes
        self.eliminated_alpha_index = np.isnan(self.beta) & self.alpha_index
        self.eliminated_beta_index = np.isnan(self.beta)
        self.eliminated_gamma_index = np.isnan(self.gamma)

        # there should be at least as many integration node columns as nonzero sigma columns
        if economy.agents.nodes.shape[1] < self.nonzero_sigma_index.sum():
            raise ValueError(
                f"The number of columns of integration nodes, {economy.agents.nodes.shape[1]}, is smaller than the "
                f"number of columns in sigma with at least one nonzero parameter, {self.nonzero_sigma_index.sum()}."
            )

        # alpha cannot be concentrated out if there's a supply side
        if economy.K3 > 0:
            for formulation, eliminated in zip(
                    economy._X1_formulations,
                    self.eliminated_beta_index.flatten()):
                if 'prices' in formulation.names and eliminated:
                    raise ValueError(
                        f"A supply side was specified, so alpha should not be concentrated out. That is, initial "
                        f"values should be specified for all parameters in beta on X1 characteristics involving prices."
                    )

        # validate and store parameter bounds
        self.sigma_bounds = self.initialize_bounds("sigma", self.sigma,
                                                   sigma_bounds, bounded)
        self.pi_bounds = self.initialize_bounds("pi", self.pi, pi_bounds,
                                                bounded)
        self.rho_bounds = self.initialize_bounds("rho", self.rho, rho_bounds,
                                                 bounded)
        self.beta_bounds = self.initialize_bounds("beta", self.beta,
                                                  beta_bounds, bounded)
        self.gamma_bounds = self.initialize_bounds("gamma", self.gamma,
                                                   gamma_bounds, bounded)

        # identify the type of rho parameter that has been specified (either for all groups or just one)
        rho_type = OneGroupRhoParameter if self.rho.size > 1 else AllGroupsRhoParameter

        # store information about fixed and unfixed parameters
        self.fixed: List[Parameter] = []
        self.unfixed: List[Parameter] = []
        self.eliminated: List[Parameter] = []
        self.store(SigmaParameter, zip(*np.triu_indices_from(self.sigma)),
                   self.sigma_bounds)
        self.store(PiParameter, np.ndindex(self.pi.shape), self.pi_bounds)
        self.store(rho_type, np.ndindex(self.rho.shape), self.rho_bounds)
        self.store(BetaParameter, np.ndindex(self.beta.shape),
                   self.beta_bounds, self.eliminated_beta_index)
        self.store(GammaParameter, np.ndindex(self.gamma.shape),
                   self.gamma_bounds, self.eliminated_gamma_index)

        # count the number of unfixed parameters
        self.P = len(self.unfixed)

        # skip overflow checks and bound computation if parameters aren't bounded
        if not bounded:
            return

        # identify which parameters need default bounds
        unbounded_parameters = []
        bounds_mapping = {
            SigmaParameter: sigma_bounds,
            PiParameter: pi_bounds,
            RhoParameter: rho_bounds
        }
        for parameter in self.unfixed:
            for parameter_type, bounds in bounds_mapping.items():
                if isinstance(parameter, parameter_type):
                    if bounds is None:
                        unbounded_parameters.append(parameter)
                    break

        # compute default bounds for unbounded parameters such that conditional on reasonable values for all other
        #   parameters, choice probability computation is unlikely to require overflow safety precautions
        mu_norm = self.compute_mu_norm(economy)
        mu_max = np.log(np.finfo(np.float64).max)
        for parameter in unbounded_parameters:
            location = parameter.location
            if isinstance(parameter, NonlinearCoefficient):
                v_norm = np.abs(
                    parameter.get_agent_characteristic(economy)).max()
                x_norm = np.abs(
                    parameter.get_product_characteristic(economy)).max()
                additional_mu_norm = self.compute_mu_norm(
                    economy, eliminate_parameter=parameter)
                with np.errstate(divide='ignore'):
                    bound = self.normalize_default_bound(
                        max(0, mu_max - additional_mu_norm) / v_norm / x_norm)
                if isinstance(parameter, SigmaParameter):
                    lb = min(self.sigma[location],
                             -bound if location[0] != location[1] else 0)
                    ub = max(self.sigma[location], +bound)
                    self.sigma_bounds[0][location], self.sigma_bounds[1][
                        location] = lb, ub
                else:
                    assert isinstance(parameter, PiParameter)
                    lb = min(self.pi[location], -bound)
                    ub = max(self.pi[location], +bound)
                    self.pi_bounds[0][location], self.pi_bounds[1][
                        location] = lb, ub
            else:
                assert isinstance(parameter, RhoParameter)
                lb = min(self.rho[location], 0)
                ub = max(
                    self.rho[location],
                    self.normalize_default_bound(1 - min(1, mu_norm / mu_max)))
                self.rho_bounds[0][location], self.rho_bounds[1][
                    location] = lb, ub
示例#46
0
# (discretized) gradient of the reconstruction. For each of the dimensions
# we create two functionals and two operators.

# Start with empty lists ...
tv_functionals = []
tv_operators = []
tv_stepsizes = []

# ... and for each dimension of the reconstruction space ...
reco_shape = reco_space.shape
reco_dim = len(reco_shape)
for dim in range(reco_dim):
    # ... add two operators taking only the even and odd elements,
    # respectively, in that dimension.
    partial_der = odl.PartialDerivative(reco_space, dim, pad_mode='order0')
    all_points = list(np.ndindex(reco_shape))
    even_pts = [list(p) for p in all_points if p[dim] % 2 == 0]
    even_pts = np.array(even_pts).T.tolist()
    odd_pts = [list(p) for p in all_points if p[dim] % 2 == 1]
    odd_pts = np.array(odd_pts).T.tolist()
    op1 = reco_space.cell_sides[dim] * odl.SamplingOperator(
        reco_space, even_pts) * partial_der
    op2 = reco_space.cell_sides[dim] * odl.SamplingOperator(
        reco_space, odd_pts) * partial_der
    tv_functionals += [
        odl.solvers.L1Norm(op1.range),
        odl.solvers.L1Norm(op2.range)
    ]
    tv_operators += [op1, op2]
    tv_stepsizes += [0.5 / reco_shape[dim], 0.5 / reco_shape[dim]]
                              pymc_medians['ampdy0'],
                              pymc_medians['center0'],
                              pymc_medians['centerdx0'],
                              pymc_medians['centerdy0'],
                              pymc_medians['sigma0'],
                              pymc_medians['sigmadx0'],
                              pymc_medians['sigmady0'],
                              npix=npix,
                              force_positive=False)

fitcube_pymc = pymc_medians['on0'] * fitcube_pymc

pl.figure(1).clf()
fig, axes = pl.subplots(npix, npix, sharex=True, sharey=True, num=1)

for ii, ((yy, xx), ax) in enumerate(zip(np.ndindex((npix, npix)),
                                        axes.ravel())):
    ax.plot(model[:, yy, xx],
            'k-',
            alpha=0.25,
            zorder=-10,
            linewidth=3,
            drawstyle='steps-mid')
    ax.plot(model_with_noise[:, yy, xx],
            'k-',
            zorder=-5,
            linewidth=1,
            drawstyle='steps-mid')
    ax.plot(fitcube[:, yy, xx],
            'b--',
            zorder=0,
示例#48
0
def project(cube, target_proj, nx=None, ny=None):
    """
    Nearest neighbour regrid to a specified target projection.

    Return a new cube that is the result of projecting a cube with 1 or 2
    dimensional latitude-longitude coordinates from its coordinate system into
    a specified projection e.g. Robinson or Polar Stereographic.
    This function is intended to be used in cases where the cube's coordinates
    prevent one from directly visualising the data, e.g. when the longitude
    and latitude are two dimensional and do not make up a regular grid.

    Args:
        * cube
            An instance of :class:`iris.cube.Cube`.
        * target_proj
            An instance of the Cartopy Projection class, or an instance of
            :class:`iris.coord_systems.CoordSystem` from which a projection
            will be obtained.
    Kwargs:
        * nx
            Desired number of sample points in the x direction for a domain
            covering the globe.
        * ny
            Desired number of sample points in the y direction for a domain
            covering the globe.

    Returns:
        An instance of :class:`iris.cube.Cube` and a list describing the
        extent of the projection.

    .. note::

        This function assumes global data and will if necessary extrapolate
        beyond the geographical extent of the source cube using a nearest
        neighbour approach. nx and ny then include those points which are
        outside of the target projection.

    .. note::

        Masked arrays are handled by passing their masked status to the
        resulting nearest neighbour values.  If masked, the value in the
        resulting cube is set to 0.

    .. warning::

        This function uses a nearest neighbour approach rather than any form
        of linear/non-linear interpolation to determine the data value of each
        cell in the resulting cube. Consequently it may have an adverse effect
        on the statistics of the data e.g. the mean and standard deviation
        will not be preserved.

    """
    try:
        lat_coord, lon_coord = _get_lat_lon_coords(cube)
    except IndexError:
        raise ValueError('Cannot get latitude/longitude '
                         'coordinates from cube {!r}.'.format(cube.name()))

    if lat_coord.coord_system != lon_coord.coord_system:
        raise ValueError('latitude and longitude coords appear to have '
                         'different coordinates systems.')

    if lon_coord.units != 'degrees':
        lon_coord = lon_coord.copy()
        lon_coord.convert_units('degrees')
    if lat_coord.units != 'degrees':
        lat_coord = lat_coord.copy()
        lat_coord.convert_units('degrees')

    # Determine source coordinate system
    if lat_coord.coord_system is None:
        # Assume WGS84 latlon if unspecified
        warnings.warn('Coordinate system of latitude and longitude '
                      'coordinates is not specified. Assuming WGS84 Geodetic.')
        orig_cs = iris.coord_systems.GeogCS(semi_major_axis=6378137.0,
                                            inverse_flattening=298.257223563)
    else:
        orig_cs = lat_coord.coord_system

    # Convert to cartopy crs
    source_cs = orig_cs.as_cartopy_crs()

    # Obtain coordinate arrays (ignoring bounds) and convert to 2d
    # if not already.
    source_x = lon_coord.points
    source_y = lat_coord.points
    if source_x.ndim != 2 or source_y.ndim != 2:
        source_x, source_y = np.meshgrid(source_x, source_y)

    # Calculate target grid
    target_cs = None
    if isinstance(target_proj, iris.coord_systems.CoordSystem):
        target_cs = target_proj
        target_proj = target_proj.as_cartopy_projection()

    # Resolution of new grid
    if nx is None:
        nx = source_x.shape[1]
    if ny is None:
        ny = source_x.shape[0]

    target_x, target_y, extent = cartopy.img_transform.mesh_projection(
        target_proj, nx, ny)

    # Determine dimension mappings - expect either 1d or 2d
    if lat_coord.ndim != lon_coord.ndim:
        raise ValueError("The latitude and longitude coordinates have "
                         "different dimensionality.")

    latlon_ndim = lat_coord.ndim
    lon_dims = cube.coord_dims(lon_coord)
    lat_dims = cube.coord_dims(lat_coord)

    if latlon_ndim == 1:
        xdim = lon_dims[0]
        ydim = lat_dims[0]
    elif latlon_ndim == 2:
        if lon_dims != lat_dims:
            raise ValueError("The 2d latitude and longitude coordinates "
                             "correspond to different dimensions.")
        # If coords are 2d assume that grid is ordered such that x corresponds
        # to the last dimension (shortest stride).
        xdim = lon_dims[1]
        ydim = lon_dims[0]
    else:
        raise ValueError('Expected the latitude and longitude coordinates '
                         'to have 1 or 2 dimensions, got {} and '
                         '{}.'.format(lat_coord.ndim, lon_coord.ndim))

    # Create array to store regridded data
    new_shape = list(cube.shape)
    new_shape[xdim] = nx
    new_shape[ydim] = ny
    new_data = ma.zeros(new_shape, cube.data.dtype)

    # Create iterators to step through cube data in lat long slices
    new_shape[xdim] = 1
    new_shape[ydim] = 1
    index_it = np.ndindex(*new_shape)
    if lat_coord.ndim == 1 and lon_coord.ndim == 1:
        slice_it = cube.slices([lat_coord, lon_coord])
    elif lat_coord.ndim == 2 and lon_coord.ndim == 2:
        slice_it = cube.slices(lat_coord)
    else:
        raise ValueError('Expected the latitude and longitude coordinates '
                         'to have 1 or 2 dimensions, got {} and '
                         '{}.'.format(lat_coord.ndim, lon_coord.ndim))

#    # Mask out points outside of extent in source_cs - disabled until
#    # a way to specify global/limited extent is agreed upon and code
#    # is generalised to handle -180 to +180, 0 to 360 and >360 longitudes.
#    source_desired_xy = source_cs.transform_points(target_proj,
#                                                   target_x.flatten(),
#                                                   target_y.flatten())
#    if np.any(source_x < 0.0) and np.any(source_x > 180.0):
#        raise ValueError('Unable to handle range of longitude.')
#    # This does not work in all cases e.g. lon > 360
#    if np.any(source_x > 180.0):
#        source_desired_x = (source_desired_xy[:, 0].reshape(ny, nx) +
#                            360.0) % 360.0
#    else:
#        source_desired_x = source_desired_xy[:, 0].reshape(ny, nx)
#    source_desired_y = source_desired_xy[:, 1].reshape(ny, nx)
#    outof_extent_points = ((source_desired_x < source_x.min()) |
#                           (source_desired_x > source_x.max()) |
#                           (source_desired_y < source_y.min()) |
#                           (source_desired_y > source_y.max()))
#    # Make array a mask by default (rather than a single bool) to allow mask
#    # to be assigned to slices.
#    new_data.mask = np.zeros(new_shape)

# Step through cube data, regrid onto desired projection and insert results
# in new_data array
    for index, ll_slice in itertools.izip(index_it, slice_it):
        # Regrid source data onto target grid
        index = list(index)
        index[xdim] = slice(None, None)
        index[ydim] = slice(None, None)
        new_data[index] = cartopy.img_transform.regrid(ll_slice.data, source_x,
                                                       source_y, source_cs,
                                                       target_proj, target_x,
                                                       target_y)

#    # Mask out points beyond extent
#    new_data[index].mask[outof_extent_points] = True

# Remove mask if it is unnecessary
    if not np.any(new_data.mask):
        new_data = new_data.data

    # Create new cube
    new_cube = iris.cube.Cube(new_data)

    # Add new grid coords
    x_coord = iris.coords.DimCoord(target_x[0, :],
                                   'projection_x_coordinate',
                                   coord_system=copy.copy(target_cs))
    y_coord = iris.coords.DimCoord(target_y[:, 0],
                                   'projection_y_coordinate',
                                   coord_system=copy.copy(target_cs))

    new_cube.add_dim_coord(x_coord, xdim)
    new_cube.add_dim_coord(y_coord, ydim)

    # Add resampled lat/lon in original coord system
    source_desired_xy = source_cs.transform_points(target_proj,
                                                   target_x.flatten(),
                                                   target_y.flatten())
    new_lon_points = source_desired_xy[:, 0].reshape(ny, nx)
    new_lat_points = source_desired_xy[:, 1].reshape(ny, nx)
    new_lon_coord = iris.coords.AuxCoord(new_lon_points,
                                         standard_name='longitude',
                                         units='degrees',
                                         coord_system=orig_cs)
    new_lat_coord = iris.coords.AuxCoord(new_lat_points,
                                         standard_name='latitude',
                                         units='degrees',
                                         coord_system=orig_cs)
    new_cube.add_aux_coord(new_lon_coord, [ydim, xdim])
    new_cube.add_aux_coord(new_lat_coord, [ydim, xdim])

    coords_to_ignore = set()
    coords_to_ignore.update(cube.coords(contains_dimension=xdim))
    coords_to_ignore.update(cube.coords(contains_dimension=ydim))
    for coord in cube.dim_coords:
        if coord not in coords_to_ignore:
            new_cube.add_dim_coord(coord.copy(), cube.coord_dims(coord))
    for coord in cube.aux_coords:
        if coord not in coords_to_ignore:
            new_cube.add_aux_coord(coord.copy(), cube.coord_dims(coord))
    discarded_coords = coords_to_ignore.difference([lat_coord, lon_coord])
    if discarded_coords:
        warnings.warn('Discarding coordinates that share dimensions with '
                      '{} and {}: {}'.format(
                          lat_coord.name(), lon_coord.name(),
                          [coord.name() for coord in discarded_coords]))

    # TODO handle derived coords/aux_factories

    # Copy metadata across
    new_cube.metadata = cube.metadata

    return new_cube, extent
示例#49
0
def sparse_dot_product_forward(vector,
                               ndim,
                               mat_shape,
                               T,
                               loss,
                               window,
                               params_grad,
                               param_der,
                               n_jobs=10):
    mat_len = int(np.prod(mat_shape))
    # assert ndim * mat_len == len(vector), "not correct shape of vector"

    derivative_func = import_func(**param_der)

    deltas = list(
        itertools.product(range(-window, window + 1), repeat=vector.ndim - 2))
    mn, mx = (0, ) * ndim, vector.shape[2:]

    data, rows, cols = [], [], []

    for ax in range(ndim):
        result = Parallel(n_jobs=n_jobs, temp_folder='~/JOBLIB_TMP_FOLDER/')(
            delayed(one_der)(i, ax, T, mat_shape, mat_len, deltas, mn, mx,
                             derivative_func, vector, loss, params_grad)
            for i in tqdm(np.ndindex(*vector.shape[2:]), desc='dJ_der'))

        loc_data, loc_rows, loc_cols = map(np.concatenate, zip(*result))
        data.extend(loc_data)
        rows.extend(loc_rows)
        cols.extend(loc_cols)

    gc.collect()
    for i in range(min(mat_shape)):
        I = matrix_to_vec_indices(i, mat_shape)
        for ax in itertools.combinations(range(ndim), 2):
            der = derivative_func(i=(
                T,
                ax[0],
            ) + (i, ) * ndim,
                                  j=(
                                      T,
                                      ax[1],
                                  ) + (i, ) * ndim,
                                  vf=np.copy(vector),
                                  loss=loss,
                                  **params_grad)
            # if np.abs(der) > 1e-15:

            i_loc = I + ax[0] * mat_len
            j_loc = I + ax[1] * mat_len

            data.extend([der, der])
            rows.extend([i_loc, j_loc])
            cols.extend([j_loc, i_loc])

    shape = (ndim * mat_len, ndim * mat_len)
    result = coo_matrix((data, (rows, cols)), shape=shape)

    regul = np.real(ifftn(params_grad['reg'].regularizer.operator))
    r = np.arange(int(ndim * mat_len))

    reg = coo_matrix((np.repeat(regul.reshape(-1), ndim), (r, r)), shape=shape)
    try:
        return inv(result + reg)
    except:
        regul2 = coo_matrix((np.repeat(1e-8, len(r)), (r, r)), shape=shape)
        return inv(result + reg + regul2)
示例#50
0
def readHdf2Dat(tag, fin, outprefix, efunc, paramag=False, niter=-1, iiw=-1):
    """  Function to convert w2dynamics hdf5-file into data files
        accepted by the maxent program
        the (historic) file format consists of two comment lines,
        which are ignored, followed by data lines
         
        the input parameters follow as:
         tag: gtau or giw, determines what quantity is to be read out
         fin: hdf5 input file name
         outprefix: prefix for output files
         efunc: adjust the error manually (expects python expression)
         paramag: paramagnetic symmetrization
         niter: w2dyn iteration, if -1 then "dmft-last" is selected
         iiw: limit the number of iw frequencies on giw, symmetrically
              around 0, if iww=-1 all frequencies are used
   """
    hf = hdf5.File(fin, "r")

    file_version = tuple(hf.attrs["outfile-version"])
    beta = qttys.MetaQttyContainer("config", hf, file_version) \
             .select(qttys.SelectorPool(qttys.Selector("*beta"))) \
             .values()

    #we refer to tau or iw array as ximag in the following
    if tag == "gtau":
        ximag = hf[("axes", ".axes")[file_version[0] - 1]]["taubin"].value

    if tag == "giw-meas":
        ximag = hf[("axes", ".axes")[file_version[0] - 1]]["iw"].value

    #selecting the iteration
    if (niter == -1):
        diter = "dmft-last"
        try:
            hf[diter]
        except KeyError:
            diter = "stat-last"
    else:
        diter = "dmft-" + "%03d" % niter
        try:
            hf[diter]
        except KeyError:
            diter = "stat-" + "%03d" % niter

    outlist = []

    for iineq, (all_gtaumean, all_gtauerr) in \
       enumerate(qttys.ineq_quantity(hf[diter], tag)):

        if paramag:
            all_gtaumean = all_gtaumean.mean(1)[:, None]
            all_gtauerr = np.sqrt((all_gtauerr**2).sum(1)[:, None])

        for iband, ispin in np.ndindex(all_gtaumean.shape[:2]):
            suffix = diter + "_" + str(iineq) + "_" + str(iband) + "_" + str(
                ispin) + ".dat"

            outlist.append(suffix)

            fdat = file(outprefix + suffix, "w")
            print >> fdat, " "
            print >> fdat, "# beta = %s, iter = %s, ineq = %s, band = %s, spin = %s" % (
                beta[0], niter, iineq, iband, ispin)

            gtaumean = all_gtaumean[iband, ispin]

            #cutting frequency box symmetrically using iiw in case the number of freuqencies
            #in giw-meas is too large
            if tag == "giw-meas" and iiw != None:
                gtaumean = gtaumean[gtaumean.shape[0] // 2 -
                                    iiw // 2:gtaumean.shape[0] // 2 + iiw // 2]
                ximag = ximag[ximag.shape[0] // 2 -
                              iiw // 2:ximag.shape[0] // 2 + iiw // 2]

            gtauerr = all_gtauerr[iband, ispin]
            gtauerr[...] = efunc(ximag, gtaumean, gtauerr)

            for i in xrange(gtaumean.shape[0]):
                if tag == "gtau":
                    print >> fdat, ximag[i], gtaumean[i], gtauerr[i]
                if tag == "giw-meas":
                    print >> fdat, ximag[i], gtaumean[i].real, gtaumean[
                        i].imag, gtauerr[i]
    return outlist
示例#51
0
    def _run_py(self, q, q_direction=None):
        if self._dynmat.is_nac():
            if q_direction is None:
                fc_nac = self._nac(q)
                d_nac = self._d_nac(q)
            else:
                fc_nac = self._nac(q_direction)
                d_nac = self._d_nac(q_direction)

        fc = self._force_constants
        vecs = self._smallest_vectors
        multiplicity = self._multiplicity
        num_patom = len(self._p2s_map)
        num_satom = len(self._s2p_map)

        if self._derivative_order == 2:
            num_elem = 6
        else:
            num_elem = 3

        ddm = np.zeros((num_elem, 3 * num_patom, 3 * num_patom), dtype=complex)

        for i, j in list(np.ndindex(num_patom, num_patom)):
            s_i = self._p2s_map[i]
            s_j = self._p2s_map[j]
            mass = np.sqrt(self._mass[i] * self._mass[j])
            ddm_local = np.zeros((num_elem, 3, 3), dtype='complex128')

            for k in range(num_satom):
                if s_j != self._s2p_map[k]:
                    continue

                multi = multiplicity[k, i]
                vecs_multi = vecs[k, i, :multi]
                phase_multi = np.exp(
                    [np.vdot(vec, q) * 2j * np.pi for vec in vecs_multi])
                vecs_multi_cart = np.dot(vecs_multi, self._pcell.get_cell())
                coef_order1 = 2j * np.pi * vecs_multi_cart
                if self._derivative_order == 2:
                    coef_order2 = [np.outer(co1, co1) for co1 in coef_order1]
                    coef = np.array([
                        co2.ravel()[[0, 4, 8, 5, 2, 1]] for co2 in coef_order2
                    ])
                else:
                    coef = coef_order1

                if self._dynmat.is_nac():
                    fc_elem = fc[s_i, k] + fc_nac[i, j]
                else:
                    fc_elem = fc[s_i, k]

                for l in range(num_elem):
                    ddm_elem = fc_elem * (coef[:, l] * phase_multi).sum()
                    if (self._dynmat.is_nac()
                            and not self._derivative_order == 2):
                        ddm_elem += d_nac[l, i, j] * phase_multi.sum()

                    ddm_local[l] += ddm_elem / mass / multi

            ddm[:, (i * 3):(i * 3 + 3), (j * 3):(j * 3 + 3)] = ddm_local

        # Impose Hermite condition
        self._ddm = np.array([(ddm[i] + ddm[i].conj().T) / 2
                              for i in range(num_elem)])
def _remove_diags(tensor):
    for kk in np.ndindex(tensor.shape):
        if len(set(kk)) != len(kk):
            tensor[kk] = 0
    return tensor
def gen_ndindex(shape):
    for ind in np.ndindex(shape):
        yield ind
    def testTopK(self):

        config = tf.ConfigProto(
            intra_op_parallelism_threads=1,
            inter_op_parallelism_threads=1)

        with self.test_session(config=config) as sess, tf.device("/gpu:0"):

            for shape in shapes:

                topK = shape[-1] // 4 # 25% sparsity

                np.random.seed(int(time()))
                cpuX = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)
                cpuE = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)

                X = tf.placeholder(tf.float32, cpuX.shape)
                E = tf.placeholder(tf.float32, cpuE.shape)

                for mask_dims in (0, 2, 3):

                    if mask_dims == 0:
                        mask = M = m_shape = None
                        feed_dict = { X: cpuX, E: cpuE }

                    else:
                        m_shape = [1 for n in shape]
                        m_shape[-mask_dims:] = shape[-mask_dims:]
                        mask = np.zeros(m_shape, dtype=np.float32)

                        if mask_dims == 2:
                            for y, x in np.ndindex(mask.shape[-2:]):
                                if x <= y: mask[:,:,y,x] = 3.0
                        elif mask_dims == 3:
                            for z, y, x in np.ndindex(mask.shape[-3:]):
                                if x <= y: mask[:,z,y,x] = (z+1)*3.0

                        M = tf.placeholder(tf.float32, mask.shape)
                        feed_dict = { X: cpuX, E: cpuE, M: mask }

                    for dtype in (tf.float32, ):  #tf.float16, tf.bfloat16

                        rtol = 1e-4 if dtype is tf.float32 else 1e-1

                        Y = ew.float_cast(X, dtype=dtype)

                        #Y = trans.masked_top_k_softmax(Y, topK, mask=M, scale=2.0)

                        Y = trans.masked_softmax(Y, mask=M, scale=2.0, bench=bench)

                        Y = ew.float_cast(Y, dtype=tf.float32, dx_dtype=dtype)
                        D = tf.gradients(Y, [X], E)

                        #devY, = sess.run( [Y], feed_dict)
                        devY, (devDX,) = sess.run( [Y, D], feed_dict)
                        #devY, (devDX,), tfY = sess.run( [Y, D, tf.nn.top_k(X, topK)], feed_dict)

                        # gradient_checker tests are insanely slow
                        # if True:
                        #     x = tf.constant(cpuX)
                        #     m = tf.constant(mask)
                        #     y = trans.masked_top_k_softmax(x, topK, mask=m)

                        # error = gradient_checker.compute_gradient_error(x, shape, y, shape) #, extra_feed_dict={ x: cpuX, m: mask }
                        # assert error < 0.01, error

                        if bench == 0:

                            # cpuY  = trans.masked_top_k_softmax_test(cpuX, topK, mask=mask, scale=2.0)
                            # cpuDX = trans.masked_softmax_grad_test(cpuE, cpuY, mask=mask, scale=2.0)

                            cpuY  = trans.masked_softmax_test(cpuX, mask=mask, scale=2.0)
                            cpuDX = trans.masked_softmax_grad_test(cpuE, cpuY, mask=mask, scale=2.0)
                            difY  = np.abs(cpuY -  devY)
                            difDX = np.abs(cpuDX - devDX)
                            cntY  = (difY  > rtol).astype(np.int).sum() / difY.size
                            cntDX = (difDX > rtol).astype(np.int).sum() / difDX.size

                            print("%s, shape:%18s, mask:%18s, errY:%.5f, errDX:%.5f" % (dtype.name, str(shape), str(m_shape), cntY, cntDX))

                            if out:
                                np.savetxt( "cpuY.txt",  cpuY.reshape(-1,shape[-1]), fmt="%6.3f")
                                np.savetxt( "devY.txt",  devY.reshape(-1,shape[-1]), fmt="%6.3f")
                                np.savetxt("cpuDX.txt", cpuDX.reshape(-1,shape[-1]), fmt="%6.3f")
                                np.savetxt("devDX.txt", devDX.reshape(-1,shape[-1]), fmt="%6.3f")
                                np.savetxt("difDX.txt", difDX.reshape(-1,shape[-1]), fmt="%6.3f")
def window_search(img,
                  clf,
                  scaler,
                  window_scale=(1, 1),
                  color_space='RGB',
                  x_start_stop=[None, None],
                  y_start_stop=[None, None],
                  spatial_size=(32, 32),
                  hist_bins=32,
                  orient=9,
                  pix_per_cell=8,
                  cell_per_block=2,
                  hog_channel='All',
                  spatial_feat=True,
                  hist_feat=True,
                  hog_feat=True):
    # If x and y start/stop positions are not defined, set them as image dimensions
    if x_start_stop[0] == None:
        x_start_stop[0] = 0
    if x_start_stop[1] == None:
        x_start_stop[1] = img.shape[1]
    if y_start_stop[0] == None:
        y_start_stop[0] = 0
    if y_start_stop[1] == None:
        y_start_stop[1] = img.shape[0]

    # Remove unnecessary parts of the image
    cropped_img = img[y_start_stop[0]:y_start_stop[1],
                      x_start_stop[0]:x_start_stop[1]]

    # Put the image in the right colorspace
    color_space_img = color_convert(cropped_img, color_space)

    # Training images are 64x64, so we look at 64x64 windows.
    # Different window scales are accomplished by rescaling the image
    resized = cv2.resize(color_space_img,
                         (np.int(color_space_img.shape[1] / window_scale[1]),
                          np.int(color_space_img.shape[0] / window_scale[0])))

    #TODO don't calculate all chanels when we don't need to
    # Compute the HOG features for the entire image just once
    hog1 = get_hog_features(resized[:, :, 0],
                            orient,
                            pix_per_cell,
                            cell_per_block,
                            feature_vec=False)
    hog2 = get_hog_features(resized[:, :, 1],
                            orient,
                            pix_per_cell,
                            cell_per_block,
                            feature_vec=False)
    hog3 = get_hog_features(resized[:, :, 2],
                            orient,
                            pix_per_cell,
                            cell_per_block,
                            feature_vec=False)

    blocks_per_window = np.int(64 / pix_per_cell - cell_per_block +
                               1)  # How many HOG blocks each window contains

    nx_blocks = hog1.shape[1]
    ny_blocks = hog1.shape[0]

    nx_windows = nx_blocks - blocks_per_window + 1
    ny_windows = ny_blocks - blocks_per_window + 1

    windows = []

    for xs, ys in np.ndindex((nx_windows, ny_windows)):
        if hog_channel == 1:
            hog_features = hog1[ys:ys + blocks_per_window,
                                xs:xs + blocks_per_window, :, :, :].ravel()
        elif hog_channel == 2:
            hog_features = hog2[ys:ys + blocks_per_window,
                                xs:xs + blocks_per_window, :, :, :].ravel()
        elif hog_channel == 3:
            hog_features = hog3[ys:ys + blocks_per_window,
                                xs:xs + blocks_per_window, :, :, :].ravel()
        else:  # All channels
            hog_features = []
            hog_features.extend(hog1[ys:ys + blocks_per_window, xs:xs +
                                     blocks_per_window, :, :, :].ravel())
            hog_features.extend(hog2[ys:ys + blocks_per_window, xs:xs +
                                     blocks_per_window, :, :, :].ravel())
            hog_features.extend(hog3[ys:ys + blocks_per_window, xs:xs +
                                     blocks_per_window, :, :, :].ravel())

        subimg = resized[pix_per_cell * ys:pix_per_cell * ys + 64,
                         pix_per_cell * xs:pix_per_cell * xs + 64, :]

        # Get color features
        spatial_features = cv2.resize(img, spatial_size).ravel()
        hist_features = color_hist(subimg, nbins=hist_bins)

        # Scale features and make a prediction
        test_features = scaler.transform(
            np.hstack((spatial_features, hist_features,
                       hog_features)).reshape(1, -1))

        test_prediction = clf.predict(test_features)

        window_left = int(
            (pix_per_cell * xs * window_scale[0] + x_start_stop[0]))
        window_top = int(
            (pix_per_cell * ys * window_scale[1] + y_start_stop[0]))
        window_right = int(
            ((pix_per_cell * xs + 64) * window_scale[0] + x_start_stop[0]))
        window_bottom = int(
            ((pix_per_cell * ys + 64) * window_scale[1] + y_start_stop[0]))

        if test_prediction:
            windows.append(
                ((window_left, window_top), (window_right, window_bottom)))

    return windows
示例#56
0
                        [
                            -1, 1, 1, 1, 0, 0, -1, 1, -1, -1, -1, -1, 2, 2, 2,
                            4, 2, 2, -1, -1
                        ],
                        [
                            -1, 1, 0, 0, 0, -1, -1, 1, -1, -1, -1, -1, -1, -1,
                            -1, 0, -1, -1, -1, -1
                        ],
                        [
                            -1, 1, 0, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1,
                            -1, -1, 0, -1, -1, -1, -1
                        ]]

                floormap = np.array(grid).transpose(1, 0)

                for i, j in np.ndindex(h, w):
                    relativeLoc = tuple(
                        projectiveTransform.inverse(np.array([[j, i]]))[0])
                    nearestBorder = (200 * round(relativeLoc[0] / 200),
                                     200 * round(relativeLoc[1] / 200))
                    if abs(relativeLoc[0] - nearestBorder[0]) <= 5 or abs(
                            relativeLoc[1] - nearestBorder[1]) <= 5:
                        gridLoc = (5 + int(relativeLoc[0] // 200),
                                   11 + int(relativeLoc[1] // 200))
                        if floormap[gridLoc] != -1:
                            cellBorders.append((i, j))
                # print(len(cellBorders))

            for cellBorder in cellBorders:
                frame[cellBorder] = (0, 255, 255)
示例#57
0
import matplotlib.pyplot as plt
import numpy as np

voxels = np.ones((2, 2, 2), dtype=int)
colors = np.zeros((2, 2, 2, 3), dtype=float)
# colors[0, 0, 0] = 1

image = np.array([[[255, 0, 0], [0, 255, 0]], [[0, 0, 255], [0, 255, 0]],
                  [[0, 0, 255], [0, 255, 0]]])

# noinspection DuplicatedCode
for i in np.ndindex(image.shape[:2]):
    for k in range(3):
        # transforms

        index = i[0], i[1], k
        bounds = True
        _ = [
            bounds := bounds & (x < y) for x, y in zip(index, voxels.shape)
            if bounds
        ]
        if not bounds:
            continue

        if voxels[index] == 1:
            colors[index] = image[i]
            break
colors /= 255

fig = plt.figure()
ax = fig.add_subplot(projection='3d')
示例#58
0
文件: base.py 项目: ApexBurger/DefDAP
    def grainMapDataCoarse(self,
                           mapData=None,
                           grainData=None,
                           kernelSize=2,
                           bg=np.nan):
        """
        Create a coarsed data map of this grain only from the given map
        data. Data is coarsened using a kernel at each pixel in the
        grain using only data in this grain.

        Parameters
        ----------
        mapData : numpy.ndarray
            Array of map data. This must be cropped! Either this or
            'grainData' must be supplied and 'grainData' takes precedence.
        grainData : numpy.ndarray
            List of data at each point in the grain. Either this or
            'mapData' must be supplied and 'grainData' takes precedence.
        kernelSize : int, optional
            Size of kernel as the number of pixels to dilate by i.e 1
            gives a 3x3 kernel.
        bg : various, optional
            Value to fill the background with. Must be same dtype as
            input array.

        Returns
        -------
        numpy.ndarray
            Map of this grains coarsened data.

        """
        grainMapData = self.grainMapData(mapData=mapData, grainData=grainData)
        grainMapDataCoarse = np.full_like(grainMapData, np.nan)

        for i, j in np.ndindex(grainMapData.shape):
            if np.isnan(grainMapData[i, j]):
                grainMapDataCoarse[i, j] = bg
            else:
                coarseValue = 0

                if i - kernelSize >= 0:
                    yLow = i - kernelSize
                else:
                    yLow = 0
                if i + kernelSize + 1 <= grainMapData.shape[0]:
                    yHigh = i + kernelSize + 1
                else:
                    yHigh = grainMapData.shape[0]
                if j - kernelSize >= 0:
                    xLow = j - kernelSize
                else:
                    xLow = 0
                if j + kernelSize + 1 <= grainMapData.shape[1]:
                    xHigh = j + kernelSize + 1
                else:
                    xHigh = grainMapData.shape[1]

                numPoints = 0
                for k in range(yLow, yHigh):
                    for l in range(xLow, xHigh):
                        if not np.isnan(grainMapData[k, l]):
                            coarseValue += grainMapData[k, l]
                            numPoints += 1

                if numPoints > 0:
                    grainMapDataCoarse[i, j] = coarseValue / numPoints
                else:
                    grainMapDataCoarse[i, j] = np.nan

        return grainMapDataCoarse
示例#59
0
def annotate(model, predict_volume, patient_img, patient_mask):
    """
    Return a DataFrame including position, diameter and chance of abnormal
    tissue to be a nodule.  By a given model and a volumetric data.

    Args:
        model: 3D ConvNet that should be used to predict a nodule and its malignancy.
        predict_volume:
        patient_img:
        patient_mask:

    Returns:
        pandas.DataFrame containing anno_index, coord_x, coord_y, coord_z, diameter, nodule_chance, diameter_mm
        of each found nodule.
    """

    num_done = 0
    num_skipped = 0
    annotation_index = 0

    batch_list = []
    batch_list_coords = []
    patient_predictions_csv = []

    logging.info("Predicted Volume Shape: {}".format(predict_volume.shape))

    for z, y, x in np.ndindex(predict_volume.shape[:3]):
        # if cube_img is None:
        cube_img = patient_img[z * STEP:z * STEP + CROP_SIZE,
                               y * STEP:y * STEP + CROP_SIZE,
                               x * STEP:x * STEP + CROP_SIZE]
        cube_mask = patient_mask[z * STEP:z * STEP + CROP_SIZE,
                                 y * STEP:y * STEP + CROP_SIZE,
                                 x * STEP:x * STEP + CROP_SIZE]

        num_done += 1
        if num_done % 10000 == 0:
            logging.info("Done: ", num_done, " skipped:", num_skipped)

        if cube_mask.sum() < 2000:
            num_skipped += 1
            continue

        if CROP_SIZE != CUBE_SIZE:
            cube_img = rescale_patient_images(
                cube_img, (CUBE_SIZE, CUBE_SIZE, CUBE_SIZE))

        # if you want to consider CROP_SIZE != CUBE_SIZE, see PR #147 for rescale_patient_images2 which
        # rescales input images to support this case
        batch_list_coords.append((z, y, x))
        img_prep = prepare_image_for_net3D(cube_img)
        batch_list.append(img_prep)
        if len(batch_list) % BATCH_SIZE == 0:
            batch_data = np.vstack(batch_list)
            p = model.predict(batch_data, batch_size=BATCH_SIZE)
            ppc, annotation_index = stats_from_batch(p, patient_img.shape,
                                                     predict_volume,
                                                     batch_list_coords,
                                                     annotation_index)
            patient_predictions_csv.extend(ppc)
            batch_list[:] = []
            batch_list_coords[:] = []

    return patient_predictions_csv
示例#60
0
def plot_scatter(
    dataXd,
    mask=None,
    masked_opacity=0.,
    labels=None,
    colors=True,
    dimcolor=1,
    title=None,
    limits='auto',
    thresholds=None,
    hint_opacity=0.9,
    x_jitter=None,
    y_jitter=None,
    fig=None,
    ax_scatter=None,
    ax_hist_x=None,
    ax_hist_y=None,
    bp_location='scatter',
    xlim=None,
    ylim=None,
    rasterized=None,
    uniq=False,
    include_stats=False,
):
    """
    Parameters
    ----------
    dataXd: array
      The volumetric (or not) data to plot where first dimension
      should only have 2 items
    mask: array, optional
      Additional mask to specify which values do not consider to plot.
      By default values with 0s in both dimensions are not plotted.
    masked_opacity: float, optional
      By default masked out values are not plotted at all.  Value in
      (0,1] will make them visible with this specified opacity
    labels: list of str, optional
      Labels to place for x and y axes
    colors: bool or string or colormap, optional
      Either to use colors to associate with physical location and
      what colormap to use (jet by default if colors=True)
    dimcolor: int
      If `colors`, then which dimension (within given 3D volume) to
      "track"
    limits: 'auto', 'same', 'per-axis' or (min, max)
      Limits for axes: when 'auto' if data ranges overlap is more than
      50% of the union range, 'same' is considered.  When 'same' --
      the same limits on both axes as determined by data.  If
      two-element tuple or list is provided, then that range is
      applied to both axes.
    hint_opacity: float, optional
      If `colors` is True, to then a "slice" of the volumetric data
      is plotted in the specified opacity to hint about the location
      of points in the original Xd data in `dimcolor` dimension
    x_jitter: float, optional
      Half-width of uniform noise added to x values.  Might be useful if data
      is quantized so it is valuable to jitter points a bit.
    y_jitter: float, optional
      Half-width of uniform noise added to y values.  Might be useful if data
      is quantized so it is valuable to jitter points a bit
    fig : Figure, optional
      Figure to plot on, otherwise new one created
    ax_*: axes, optional
      Axes for the scatter plot and histograms. If none of them is specified
      (which is the default) then 'classical' plot is rendered with histograms
      above and to the right
    bp_location: ('scatter', 'hist', None), optional
      Where to place boxplots depicting data range
    xlim: tuple, optional
    ylim: tuple, optional
      To fix plotted range
    rasterized: bool, optional
      Passed to scatter call, to allow rasterization of heavy scatter plots
    uniq: bool, optional
      Plot uniq values (those present in one but not in the other) along
      each axis with crosses
    include_stats: bool, optional
      Whether to report additional statistics on the data. Stats are also
      reported via verbose at level 2
    """
    if len(dataXd) != 2:
        raise ValueError("First axis of dataXd can only have two dimensions, "
                         "got {0}".format(len(dataXd)))
    dataXd = np.asanyarray(
        dataXd
    )  # TODO: allow to operate on list of arrays to not waste RAM/cycles
    data = dataXd.reshape((2, -1))
    if dataXd.ndim < 5:
        ntimepoints = 1
    elif dataXd.ndim == 5:
        ntimepoints = dataXd.shape[-1]
    else:
        raise ValueError("Do not know how to handle data with %d dimensions" %
                         (dataXd.ndim - 1))
    if x_jitter or y_jitter:
        data = data.copy()  # lazy and wasteful

        def jitter_me(x, w):
            x += np.random.uniform(-w, w, size=data.shape[-1])

        if x_jitter:
            jitter_me(data[0, :], x_jitter)
        if y_jitter:
            jitter_me(data[1, :], y_jitter)

    finites = np.isfinite(data)
    nz = np.logical_and(data != 0, finites)
    # TODO : avoid doing data !=0 and just use provided utter mask
    #nz[:, 80000:] = False # for quick testing

    nzsum = np.sum(nz, axis=0)

    intersection = nzsum == 2
    # for coloring we would need to know all the indices
    union = nzsum > 0
    x, y = datainter = data[:, intersection]

    if mask is not None:
        # replicate mask ntimepoints times
        mask = np.repeat(mask.ravel(), ntimepoints)[intersection] != 0
        x_masked = x[mask]
        y_masked = y[mask]

    xnoty = (nz[0].astype(int) - nz[1].astype(int)) > 0
    ynotx = (nz[1].astype(int) - nz[0].astype(int)) > 0

    msg = ''
    if not np.all(finites):
        msg = " non-finite x: %d, y: %d" % (np.sum(~finites[0]),
                                            np.sum(~finites[1]))

    verbose(
        1, "total: %d union: %d%s intersection: %d x_only: %d y_only: %d%s" %
        (len(nzsum), np.sum(union),
         mask is not None and ' masked: %d' % np.sum(mask)
         or '', np.sum(intersection), np.sum(xnoty), np.sum(ynotx), msg))

    if include_stats:
        # report some statistics as well
        import scipy.stats as ss
        r, p = ss.pearsonr(x, y)
        d = np.linalg.norm(x - y)
        statsline = "r=%.2f  p=%.4g  ||x-y||=%.4g" % (r, p, d)
        try:
            from mvpa2.misc.dcov import dcorcoef
            nmax = min(1000, len(x))
            idx = np.random.permutation(np.arange(len(x)))[:nmax]
            dcor = dcorcoef(x[idx], y[idx])
            dcor_s = '' if len(x) == nmax else '[%d random]' % nmax
            statsline += '  dcorr%s=%.4g' % (dcor_s, dcor)
        except ImportError:
            pass
        verbose(2, statsline)
    else:
        statsline = ''

    #fig=pl.figure()
    #pl.plot(datainter[0], datainter[1], '.')
    #fig.show()

    nullfmt = pl.NullFormatter()  # no labels

    # definitions for the axes
    left, width = 0.1, 0.65
    bottom, height = 0.1, 0.65
    bottom_h = left_h = left + width + 0.02

    if not (bool(ax_scatter) or bool(ax_hist_x)
            or bool(ax_hist_y)):  # no custom axes specified
        # our default setup
        rect_scatter = [left, bottom, width, height]
        rect_histx = [left, bottom_h, width, 0.2]
        rect_histy = [left_h, bottom, 0.2, height]

        # start with a rectangular Figure
        if fig is None:
            fig = pl.figure(figsize=(10, 10))

        ax_scatter = pl.axes(rect_scatter)
        ax_hist_x = pl.axes(rect_histx)
        ax_hist_y = pl.axes(rect_histy)

    else:
        # check if all not None?
        # assert(len(axes) == 3)

        ax_bp_x, ax_bp_y = None, None
        if ax_scatter is None:
            raise ValueError("Makes no sense to do not have scatter plot")

    ax_bp_x = ax_bp_y = None
    if bp_location is not None:
        ax_bp_x_parent = ax_bp_y_parent = None
        if bp_location == 'scatter':
            # place boxplots into histogram plots
            ax_bp_x_parent = ax_scatter
            ax_bp_y_parent = ax_scatter
        elif bp_location == 'hist':
            ax_bp_x_parent = ax_hist_x
            ax_bp_y_parent = ax_hist_y
        else:
            raise ValueError(
                "bp_location needs to be from (None, 'scatter', 'hist')")

        if ax_bp_x_parent:
            hist_x_pos = ax_bp_x_parent.get_position()
            ax_bp_x = pl.axes([
                hist_x_pos.x0, hist_x_pos.y0 + hist_x_pos.height * 0.9,
                hist_x_pos.width, hist_x_pos.height * 0.1
            ],
                              axisbg='y')

        if ax_bp_y_parent:
            hist_y_pos = ax_bp_y_parent.get_position()
            ax_bp_y = pl.axes([
                hist_y_pos.x0 + hist_y_pos.width * 0.9, hist_y_pos.y0,
                hist_y_pos.width * 0.1, hist_y_pos.height
            ],
                              axisbg='y')

        # ax_bp_y = pl.axes( [left + width * 0.9, bottom, width/10, height], axisbg='y' ) if ax_hist_y else None

    sc_kwargs = dict(facecolors='none', s=1,
                     rasterized=rasterized)  # common kwargs

    # let's use colormap to get non-boring colors
    cm = colors  # e.g. if it is None
    if colors is True:
        cm = pl.matplotlib.cm.get_cmap('jet')
    elif isinstance(colors, str):
        cm = pl.matplotlib.cm.get_cmap(colors)
    if cm and len(dataXd.shape) > dimcolor + 1:
        cm.set_under((1, 1, 1, 0.1))  # transparent what is not in range
        # we need to get our indices back for those we are going to plot.  probably this is the least efficient way:
        ndindices_all = np.array(list(np.ndindex(dataXd.shape[1:])))
        ndindices_nz = ndindices_all[intersection]
        # choose color based on dimcolor
        dimcolor_len = float(dataXd.shape[1 + dimcolor])
        edgecolors = cm(((cm.N - 1) * ndindices_nz[:, dimcolor] /
                         dimcolor_len).astype(int))
        if mask is not None:
            # Plot first those which might be masked out
            if masked_opacity:
                mask_inv = np.logical_not(mask)
                mask_edgecolors = edgecolors[mask_inv].copy()
                # Adjust alpha value
                mask_edgecolors[:, -1] *= masked_opacity
                ax_scatter.scatter(x[mask_inv],
                                   y[mask_inv],
                                   edgecolors=mask_edgecolors,
                                   alpha=masked_opacity,
                                   **sc_kwargs)

            # Plot (on top) those which are not masked-out
            x_plot, y_plot, edgecolors_plot = x[mask], y[mask], edgecolors[
                mask]
        else:
            # Just plot all of them at once
            x_plot, y_plot, edgecolors_plot = x, y, edgecolors

        if len(x_plot):
            ax_scatter.scatter(x_plot,
                               y_plot,
                               edgecolors=edgecolors_plot,
                               **sc_kwargs)

        # for orientation we need to plot 1 slice... assume that the last dimension is z -- figure out a slice with max # of non-zeros
        zdim_entries = ndindices_nz[:, -1]
        zdim_counts, _ = np.histogram(zdim_entries,
                                      bins=np.arange(0,
                                                     np.max(zdim_entries) + 1))
        zdim_max = np.argmax(zdim_counts)

        if hint_opacity:
            # now we need to plot that zdim_max slice taking into account our colormap
            # create new axes
            axslice = pl.axes(
                [left, bottom + height * 0.72, width / 4., height / 5.],
                axisbg='y')
            axslice.axis('off')
            sslice = np.zeros(
                dataXd.shape[1:3])  # XXX hardcoded assumption on dimcolor =1
            sslice[:, :] = np.arange(dimcolor_len)[None, :]
            # if there is time dimension -- choose minimal value across all values
            dataXd_mint = np.min(dataXd,
                                 axis=-1) if dataXd.ndim == 5 else dataXd
            sslice[
                dataXd_mint[0, ..., zdim_max] ==
                0] = -1  # reset those not in the picture to be "under" range
            axslice.imshow(sslice, alpha=hint_opacity, cmap=cm)
    else:
        # the scatter plot without colors to distinguish location
        ax_scatter.scatter(x, y, **sc_kwargs)

    if labels:
        ax_scatter.set_xlabel(labels[0])
        ax_scatter.set_ylabel(labels[1])

    # "unique" points on each of the axes
    if uniq:
        if np.sum(xnoty):
            ax_scatter.scatter(fill_nonfinites(data[0, np.where(xnoty)[0]]),
                               fill_nonfinites(data[1, np.where(xnoty)[0]]),
                               edgecolor='b',
                               **sc_kwargs)
        if np.sum(ynotx):
            ax_scatter.scatter(fill_nonfinites(data[0, np.where(ynotx)[0]]),
                               fill_nonfinites(data[1, np.where(ynotx)[0]]),
                               edgecolor='g',
                               **sc_kwargs)

    # Axes
    ax_scatter.plot((np.min(x), np.max(x)), (0, 0), 'r', alpha=0.5)
    ax_scatter.plot((0, 0), (np.min(y), np.max(y)), 'r', alpha=0.5)

    if (mask is not None and not masked_opacity):
        # if there is a mask which was not intended to be plotted,
        # take those values away while estimating min/max range
        _ = x[mask]
        minx, maxx = np.min(_), np.max(_)
        _ = y[mask]
        miny, maxy = np.min(_), np.max(_)
        del _  # no need to consume RAM
        # print "Here y range", miny, maxy
    else:
        minx, maxx = np.min(x), np.max(x)
        miny, maxy = np.min(y), np.max(y)

    # Process 'limits' option
    if isinstance(limits, str):
        limits = limits.lower()
        if limits == 'auto':
            overlap = min(maxx, maxy) - max(minx, miny)
            range_ = max(maxx, maxy) - min(minx, miny)
            limits = {
                True: 'same',
                False: 'per-axis'
            }[not range_ or overlap / float(range_) > 0.5]

        if limits == 'per-axis':
            same_range = False
            if xlim is None:
                # add some white border
                dx = (maxx - minx) / 20.
                xlim = (minx - dx, maxx + dx)
            if ylim is None:
                dy = (maxy - miny) / 20.
                ylim = (miny - dy, maxy + dy)

        elif limits == 'same':
            same_range = True
            # assign limits the numerical range
            limits = (np.min([minx, miny]), np.max([maxx, maxy]))
        else:
            raise ValueError("Do not know how to handle same_range=%r" %
                             (limits, ))
    else:
        same_range = True

    # Let's now plot threshold lines if provided
    if thresholds is not None:
        stylekwargs = dict(colors='k', linestyles='dotted')
        if len(thresholds):
            ax_scatter.vlines(thresholds[0],
                              ax_scatter.get_xlim()[0] * 0.9,
                              ax_scatter.get_xlim()[1] * 0.9, **stylekwargs)
        if len(thresholds) > 1:
            ax_scatter.hlines(thresholds[1],
                              ax_scatter.get_ylim()[0] * 0.9,
                              ax_scatter.get_ylim()[1] * 0.9, **stylekwargs)

    if same_range:
        # now determine nice limits by hand:
        binwidthx = binwidthy = binwidth = np.max(datainter) / 51.  # 0.25

        minxy, maxxy = limits
        sgn = np.sign(minxy)
        xyrange = maxxy - minxy
        xyamax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
        limn = sgn * (int(sgn * minxy / binwidth) - sgn) * binwidth
        limp = (int(maxxy / binwidth) + 1) * binwidth

        ax_scatter.plot((limn * 0.9, limp * 0.9), (limn * 0.9, limp * 0.9),
                        'y--')
        if xlim is None:
            xlim = (limn, limp)
        if ylim is None:
            ylim = (limn, limp)

        binsx = binsy = bins = np.arange(limn, limp + binwidth, binwidth)
    else:
        binwidthx = (maxx - minx) / 51.
        binwidthy = (maxy - miny) / 51.

        binsx = np.arange(minx, maxx + binwidthx, binwidthx)
        binsy = np.arange(miny, maxy + binwidthy, binwidthy)

    if xlim is not None:
        ax_scatter.set_xlim(xlim)
    if ylim is not None:
        ax_scatter.set_ylim(ylim)

    # get values to plot for histogram and boxplot
    x_hist, y_hist = (x, y) if mask is None else (x_masked, y_masked)

    if ax_hist_x is not None:
        ax_hist_x.xaxis.set_major_formatter(nullfmt)
        histx = ax_hist_x.hist(x_hist, bins=binsx, facecolor='b')
        ax_hist_x.set_xlim(ax_scatter.get_xlim())
        ax_hist_x.vlines(0, 0, 0.9 * np.max(histx[0]), 'r')

    if ax_hist_y is not None:
        ax_hist_y.yaxis.set_major_formatter(nullfmt)
        histy = ax_hist_y.hist(y_hist,
                               bins=binsy,
                               orientation='horizontal',
                               facecolor='g')
        ax_hist_y.set_ylim(ax_scatter.get_ylim())
        ax_hist_y.hlines(0, 0, 0.9 * np.max(histy[0]), 'r')

    rect_scatter = [left, bottom, width, height]

    # Box plots
    if ax_bp_x is not None:
        ax_bp_x.axis('off')
        bpx = ax_bp_x.boxplot(x_hist, vert=0)  #'r', 0)
        ax_bp_x.set_xlim(ax_scatter.get_xlim())

    if ax_bp_y is not None:
        ax_bp_y.axis('off')
        bpy = ax_bp_y.boxplot(y_hist, sym='g+')
        ax_bp_y.set_ylim(ax_scatter.get_ylim())

    if statsline:
        # draw the text based on gca
        y1, y2 = ax_scatter.get_ylim()
        x1, x2 = ax_scatter.get_xlim()
        ax_scatter.text(
            0.5 * (x1 + x2),  # center
            y2 - 0.02 * (y2 - y1),
            statsline,
            verticalalignment="top",
            horizontalalignment="center")

    if title:
        pl.title(title)

    return pl.gcf()