Exemple #1
0
def sparsify(a, p=0.25):
    """
    SPARSIFY  Randomly set matrix elements to zero.
          S = SPARSIFY(A, P) is A with elements randomly set to zero
          (S = S' if A is square and A = A', i.e. symmetry is preserved).
          Each element has probability P of being zeroed.
          Thus on average 100*P percent of the elements of A will be zeroed.
          Default: P = 0.25.

          Note added in porting: by inspection only, it appears the the m*lab
          version may have a bug where it always returns zeros on the diagonal
          for a symmetric matrix... can anyone confirm?
     """

    if p < 0 or p > 1:
        raise Higham('Second parameter must be between 0 and 1 inclusive.')

    m, n = a.shape

    if (a == a.T).all():
        # Preserve symmetry
        d = np.choose(nrnd.rand(m) > p, (np.zeros(m), np.diag(a)))
        a = np.triu(a, 1) * (nrnd.rand(m, n) > p)
        a = a + a.T
        a = a + np.diag(d)
    else:
        # Unsymmetric case
        a = np.choose(nrnd.rand(m, n) > p, (np.zeros((m, n)), a))

    return a
Exemple #2
0
def set_in_region(a, b, alpha=1.0, beta=1.0, mask=None, out=None):
    """set `ret = alpha * a + beta * b` where mask is True"""
    alpha = np.asarray(alpha, dtype=a.dtype)
    beta = np.asarray(beta, dtype=a.dtype)
    a_dat = a.data if isinstance(a, viscid.field.Field) else a
    b_dat = b.data if isinstance(b, viscid.field.Field) else b
    b = None

    if _HAS_NUMEXPR:
        vals = ne.evaluate("alpha * a_dat + beta * b_dat")
    else:
        vals = alpha * a_dat + beta * b_dat
    a_dat = b_dat = None

    if out is None:
        out = field.empty_like(a)

    if mask is None:
        out.data[...] = vals
    else:
        if hasattr(mask, "nr_comps") and mask.nr_comps:
            mask = mask.as_centered(a.center).as_layout(a.layout)
        try:
            out.data[...] = np.choose(mask, [out.data, vals])
        except ValueError:
            out.data[...] = np.choose(mask.data.reshape(list(mask.sshape) + [1]),
                                      [out.data, vals])
    return out
def hideNoData(inBand,outBand):
	noData = inBand.GetNoDataValue()
	for y in range(inBand.YSize):
		inLine = inBand.ReadAsArray(0,y,inBand.XSize,1,inBand.XSize,1)
		outLine = numpy.choose(numpy.equal(inLine,noData),(inLine,0))
		outLine = numpy.choose(numpy.not_equal(inLine,noData),(outLine,0xFF))
		outBand.WriteArray(outLine,0,y)
Exemple #4
0
def rgb_to_hsv(r, g, b):
    maxc = np.maximum(r, np.maximum(g, b))
    minc = np.minimum(r, np.minimum(g, b))

    v = maxc

    minc_eq_maxc = np.equal(minc, maxc)

    # compute the difference, but reset zeros to ones to avoid divide by zeros later.
    ones = np.ones_like(r)
    maxc_minus_minc = np.choose(minc_eq_maxc, (maxc-minc, ones))

    s = (maxc-minc) / np.maximum(ones,maxc)
    rc = (maxc-r) / maxc_minus_minc
    gc = (maxc-g) / maxc_minus_minc
    bc = (maxc-b) / maxc_minus_minc

    maxc_is_r = np.equal(maxc, r)
    maxc_is_g = np.equal(maxc, g)
    maxc_is_b = np.equal(maxc, b)

    h = np.zeros_like(r)
    h = np.choose(maxc_is_b, (h, gc-rc+4.0))
    h = np.choose(maxc_is_g, (h, rc-bc+2.0))
    h = np.choose(maxc_is_r, (h, bc-gc))

    h = np.mod(h/6.0, 1.0)

    return (h, s, v)
    def _read_particles(self):
        if not os.path.exists(self.particle_filename): return
        with open(self.particle_filename, 'r') as f:
            lines = f.readlines()
            self.num_stars = int(lines[0].strip().split(' ')[0])
            for num, line in enumerate(lines[1:]):
                particle_position_x = float(line.split(' ')[1])
                particle_position_y = float(line.split(' ')[2])
                particle_position_z = float(line.split(' ')[3])
                coord = [particle_position_x, particle_position_y, particle_position_z]
                # for each particle, determine which grids contain it
                # copied from object_finding_mixin.py
                mask = np.ones(self.num_grids)
                for i in range(len(coord)):
                    np.choose(np.greater(self.grid_left_edge.d[:,i],coord[i]), (mask,0), mask)
                    np.choose(np.greater(self.grid_right_edge.d[:,i],coord[i]), (0,mask), mask)
                ind = np.where(mask == 1)
                selected_grids = self.grids[ind]
                # in orion, particles always live on the finest level.
                # so, we want to assign the particle to the finest of
                # the grids we just found
                if len(selected_grids) != 0:
                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
                    ind = np.where(self.grids == grid)[0][0]
                    self.grid_particle_count[ind] += 1
                    self.grids[ind].NumberOfParticles += 1

                    # store the position in the *.sink file for fast access.
                    try:
                        self.grids[ind]._particle_line_numbers.append(num + 1)
                    except AttributeError:
                        self.grids[ind]._particle_line_numbers = [num + 1]
Exemple #6
0
def stitch(record1, record2):
    seq1 = array([record1.seq.tostring()])
    seq2 = array([reverse_complement(record2.seq.tostring())])
    seq1.dtype = '|S1'
    seq2.dtype = '|S1'
    quals1 = array(record1.letter_annotations['phred_quality'])
    quals2 = array(record2.letter_annotations['phred_quality'][::-1])
    
    log10p_consensus_1 = log1p(-power(10, -quals1 / 10.)) / log(10)
    log10p_consensus_2 = log1p(-power(10, -quals2 / 10.)) / log(10)
    log10p_error_1 = -log10(3) - (quals1 / 10.)
    log10p_error_2 = -log10(3) - (quals2 / 10.)
    
    min_overlap = 1
    max_overlap = max(len(record1), len(record2))
    overlaps = {}
    for overlap in range(1, max_overlap):
        s1 = seq1[-overlap:]
        s2 = seq2[:overlap]
        q1 = quals1[-overlap:]
        q2 = quals2[:overlap]
        lpc1 = log10p_consensus_1[-overlap:]
        lpc2 = log10p_consensus_2[:overlap]
        lpe1 = log10p_error_1[-overlap:]
        lpe2 = log10p_error_2[:overlap]
        
        consensus = choose(q1 < q2, [s1, s2])
        score = sum(choose(consensus == s1, [lpe1, lpc1])) + sum(choose(consensus == s2, [lpe2, lpc2])) + len(consensus) * log10(4) * 2    # last term is null hypothesis, p=1/4
        consensus.dtype = '|S%i' % len(consensus)
        overlaps[overlap] = (consensus[0],score)
    
    return overlaps
Exemple #7
0
 def __init__(self, template_img_with_alpha):
     # result must be 0 where template is transparent
     # sum of result must be 0
     # scale of result doesn't matter
     # scale and bias of input shouldn't matter
     
     assert template_img_with_alpha.shape[2] == 4
     self._orig = template_img_with_alpha.astype(float)
     
     return
     
     opaque = template_img_with_alpha[:, :, 3] >= 128
     opaque3 = numpy.dstack([opaque]*3)
     
     #opaque_pixels = [pixel[:3].astype(int)
     #    for row in template_img_with_alpha
     #    for pixel in row
     #    if is_opaque(pixel)]
     mean = numpy.sum(numpy.choose(opaque3, [0, template_img_with_alpha[:, :, :3]]), axis=(0, 1))/numpy.sum(opaque)
     #print mean
     
     res = numpy.choose(opaque3, [0, template_img_with_alpha[:, :, :3] - mean])
     
     #res = numpy.array([
     #    [pixel[:3] - mean if is_opaque(pixel) else [0, 0, 0] for pixel in row]
     #for row in template_img_with_alpha])
     
     #cv2.imshow('normalize(res)', normalize(res))
     
     self._template = res # floating point 3-channel image
Exemple #8
0
def rgb_to_hsv( r,g,b ):

    maxc = numpy.maximum(r,numpy.maximum(g,b))
    minc = numpy.minimum(r,numpy.minimum(g,b))

    v = maxc

    minc_eq_maxc = numpy.equal(minc,maxc)

    # compute the difference, but reset zeros to ones to avoid divide by zeros later.
    ones = numpy.ones((r.shape[0],r.shape[1]))
    maxc_minus_minc = numpy.choose( minc_eq_maxc, (maxc-minc,ones) )

    s = (maxc-minc) / numpy.maximum(ones,maxc)
    rc = (maxc-r) / maxc_minus_minc
    gc = (maxc-g) / maxc_minus_minc
    bc = (maxc-b) / maxc_minus_minc

    maxc_is_r = numpy.equal(maxc,r)
    maxc_is_g = numpy.equal(maxc,g)
    maxc_is_b = numpy.equal(maxc,b)

    h = numpy.zeros((r.shape[0],r.shape[1]))
    h = numpy.choose( maxc_is_b, (h,4.0+gc-rc) )
    h = numpy.choose( maxc_is_g, (h,2.0+rc-bc) )
    h = numpy.choose( maxc_is_r, (h,bc-gc) )

    h = numpy.mod(h/6.0,1.0)

    hsv = numpy.asarray([h,s,v])
    
    return hsv
Exemple #9
0
    def reflectivity(self, Q, L=1):
        """
        Compute the Fresnel reflectivity at the given Q/wavelength.
        """
        # If Q < 0, then we are going from substrate into incident medium.
        # In that case we must negate the change in scattering length density
        # and ignore the absorption.
        drho = self.rho-self.Vrho
        S = 4*pi*choose(Q<0,(-drho,drho)) \
            + 2j*pi/L*choose(Q<0,(self.Vmu,self.mu))
        kz = abs(Q)/2
        f = sqrt(kz**2 - S)  # fresnel coefficient

        # Compute reflectivity amplitude, with adjustment for roughness
        amp = (kz-f)/(kz+f) * exp(-2*self.sigma**2*kz*f)
        # Note: we do not need to check for a divide by zero.
        # Qc^2 = 16 pi rho.  Since rho is non-zero then Qc is non-zero.
        # For mu = 0:
        # * If |Qz| < Qc then f has an imaginary component, so |Qz|+f != 0.
        # * If |Qz| > Qc then |Qz| > 0 and f > 0, so |Qz|+f != 0.
        # * If |Qz| = Qc then |Qz| != 0 and f = 0, so |Qz|+f != 0.
        # For mu != 0:
        # * f has an imaginary component, so |Q|+f != 0.

        R = real(amp*conj(amp))
        return R
Exemple #10
0
def _partial_transpose_sparse(rho, mask):
    """
    Implement the partial transpose using the CSR sparse matrix.
    """

    data = sp.lil_matrix((rho.shape[0], rho.shape[1]), dtype=complex)

    for m in range(len(rho.data.indptr) - 1):

        n1 = rho.data.indptr[m]
        n2 = rho.data.indptr[m + 1]

        psi_A = state_index_number(rho.dims[0], m)

        for idx, n in enumerate(rho.data.indices[n1:n2]):

            psi_B = state_index_number(rho.dims[1], n)

            m_pt = state_number_index(
                rho.dims[1], np.choose(mask, [psi_A, psi_B]))
            n_pt = state_number_index(
                rho.dims[0], np.choose(mask, [psi_B, psi_A]))

            data[m_pt, n_pt] = rho.data.data[n1 + idx]

    return Qobj(data.tocsr(), dims=rho.dims)
def calc_fritch(data):
    """Calculate the fritch index from tmin data (C)"""
    growing_threshold=5
    
    startdate=np.zeros(data.shape[1:])+999
    enddate=np.zeros(data.shape[1:])
    curwarmdays=np.zeros(data.shape[1:])
    
    for doy in range(data.shape[0]):
        warmdays=np.where(data[doy,...]>growing_threshold)
        colddays=np.where(data[doy,...]<=growing_threshold)
        
        if len(warmdays[0])>0:
            curwarmdays[warmdays]+=1
        if len(colddays[0])>0:
            curwarmdays[colddays]=0
        
        growing=np.where(curwarmdays==5)
        if len(growing[0])>0:
            startdate[growing]=np.choose((doy-5)<startdate[growing],(startdate[growing],doy-5))
            enddate[growing]=np.choose(doy>enddate[growing],(enddate[growing],doy))
        
    growing_season=enddate-startdate
    growing_season[growing_season<0]=0
    return growing_season
Exemple #12
0
            def quad_eqn(l, m, t, aa, bb, cc):
                """
                solves the following eqns for m and l
                m = (-bb +- sqrt(bb^2 - 4*aa*cc))/(2*aa)
                l = (l-a1 - a3*m)/(a2 + a4*m)
                """
                if len(aa) is 0:
                    return
                k = bb * bb - 4 * aa * cc
                k = np.ma.masked_less(k, 0)

                det = np.ma.sqrt(k)
                m1 = (-bb - det) / (2 * aa)
                l1 = (x[t] - a[0][t] - a[2][t] *
                      m1) / (a[1][t] + a[3][t] * m1)

                m2 = (-bb + det) / (2 * aa)
                l2 = (x[t] - a[0][t] - a[2][t] *
                      m2) / (a[1][t] + a[3][t] * m2)

                t1 = np.logical_or(l1 < 0, l1 > 1)
                t2 = np.logical_or(m1 < 0, m1 > 1)
                t3 = np.logical_or(t1, t2)

                m[t] = np.choose(t3, (m1, m2))
                l[t] = np.choose(t3, (l1, l2))
Exemple #13
0
    def _daylength_processor(self, timearray) : 
        """computes daylength 
        
        Performs a simple subtraction of sunset-sunrise time. There are three 
        potential cases:
            * if positive, we have the daylength, stop
            * if negative, we have the negative of nightlength, add one.
            * if zero, the sun is either always up or always down. compute the
              solar altitude to find out.
        """
        # sunset - sunrise
        daylength = timearray[:,2] - timearray[:,1]

        # handle negative case
        daylength = np.choose(daylength<(0*u.min), [daylength, daylength+1*u.sday]) * u.sday
        
        # any "always up" or "always down"?
        no_rise_set = np.abs(timearray[:,2]-timearray[:,1]) < 1*u.min
        
        # hardcode sunrise == transit == sunset
        timearray[no_rise_set,1] = timearray[no_rise_set,0]
        timearray[no_rise_set,2] = timearray[no_rise_set,0]
        if np.any(no_rise_set) : 
            dec, H, alt = self._calc_event_body_params(timearray[no_rise_set,:], self.obs_location[no_rise_set])
            daylength[no_rise_set] = np.choose(alt[:,1]>0*u.deg, [0, 1]) * u.day        
        
        return daylength
 def updateDistribution(self, tree, dataMatrix, mislabels, distribution, beta):
   probs = tree.predictProbabilities(dataMatrix.iloc[mislabels[:,0]])
   probsIncorrect = np.choose(mislabels[:,1], probs.T)
   probsCorrect = np.choose(mislabels[:,2], probs.T)
   power = (0.5 * sum(distribution * (1 + probsCorrect - probsIncorrect)))
   distribution = distribution * (np.power(beta,power))
   distribution = helpers.toProbDistribution(distribution)
   return distribution
Exemple #15
0
	def setFluxoCalSolo(self):
		self.mask1 = self.ndvi < 0
		#-----#------#
		self.fluxoCalSolo = numpy.choose(self.mask1, (((self.temperaturaSuperficie - 273.15) * (0.0038 + (0.0074 * self.albedoSuperficie))\
							   * (1.0 - (0.98 * numpy.power(self.ndvi,4)))) * self.saldoRadiacao, self.valores.G * self.saldoRadiacao))
		#-----#------#
		self.mask1 = None
		self.fluxoCalSolo = numpy.choose(self.mask, (self.valores.noValue, self.fluxoCalSolo))
def update(fig):
    """Fit new pointing model and update plots."""
    # Perform early redraw to improve interactivity of clicks (which typically change state of target dots)
    # Target state: 0 = flagged, 1 = unflagged, 2 = highlighted
    target_state = keep * ((target_index == fig.highlighted_target) + 1)
    # Specify colours of flagged, unflagged and highlighted dots, respectively, as RGBA tuples
    dot_colors = np.choose(target_state, np.atleast_3d(np.vstack([(1,1,1,1), (0,0,1,1), (1,0,0,1)]))).T
    for ax in fig.axes[:7]:
        ax.dots.set_facecolors(dot_colors)
    fig.canvas.draw()

    # Fit new pointing model and update results
    params, sigma_params = new_model.fit(az[keep], el[keep], measured_delta_az[keep], measured_delta_el[keep],
                                         std_delta_az[keep], std_delta_el[keep], enabled_params)
    new.update(new_model)

    # Update rest of figure
    fig.texts[3].set_text("$\chi^2$ = %.1f" % new.chi2)
    fig.texts[4].set_text("all sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms))
    new.metrics(target_index == fig.highlighted_target)
    fig.texts[5].set_text("target sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms))
    new.metrics(keep)
    fig.texts[-1].set_text(unique_targets[fig.highlighted_target])
    # Update model parameter strings
    for p, param in enumerate(display_params):
        fig.texts[2*p + 6].set_text(param_to_str(new_model, param) if enabled_params[param] else '')
        # HACK to convert sigmas to arcminutes, but not for P9 and P12 (which are scale factors)
        # This functionality should really reside inside the PointingModel class
        std_param = rad2deg(sigma_params[param]) * 60. if param not in [8, 11] else sigma_params[param]
        std_param_str = ("%.2f'" % std_param) if param not in [8, 11] else ("%.0e" % std_param)
        fig.texts[2*p + 7].set_text(std_param_str if enabled_params[param] and opts.use_stats else '')
        # Turn parameter string bold if it changed significantly from old value
        if np.abs(params[param] - old_model.values()[param]) > 3.0 * sigma_params[param]:
            fig.texts[2*p + 6].set_weight('bold')
            fig.texts[2*p + 7].set_weight('bold')
        else:
            fig.texts[2*p + 6].set_weight('normal')
            fig.texts[2*p + 7].set_weight('normal')
    daz_az, del_az, daz_el, del_el, quiver, before, after = fig.axes[:7]
    # Update quiver plot
    quiver_scale = 0.1 * fig.quiver_scale_slider.val * np.pi / 6 / deg2rad(old.robust_sky_rms / 60.)
    quiver.quiv.set_segments(quiver_segments(new.residual_az, new.residual_el, quiver_scale))
    quiver.quiv.set_color(np.choose(keep, np.atleast_3d(np.vstack([(0.3,0.3,0.3,0.2), (0.3,0.3,0.3,1)]))).T)
    # Update residual plots
    daz_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_xel) * 60.])
    del_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_el) * 60.])
    daz_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_xel) * 60.])
    del_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_el) * 60.])
    after.dots.set_offsets(np.c_[np.arctan2(new.residual_el, new.residual_xel), new.abs_sky_error])
    resid_lim = 1.2 * max(new.abs_sky_error.max(), old.abs_sky_error.max())
    daz_az.set_ylim(-resid_lim, resid_lim)
    del_az.set_ylim(-resid_lim, resid_lim)
    daz_el.set_ylim(-resid_lim, resid_lim)
    del_el.set_ylim(-resid_lim, resid_lim)
    before.set_ylim(0, resid_lim)
    after.set_ylim(0, resid_lim)
    # Redraw the figure
    fig.canvas.draw()
Exemple #17
0
		def stepsize(n, x, dx, s, ds, z, dz, w, dw):
			# Note: choose takes element from second vector if condition true, from first if condition false
			delta_p_lhs = np.choose(dx < 0, [np.repeat(self.big, n), ne.evaluate("-x/dx", local_dict = {"x":x, "dx":dx})])
			delta_p_rhs = np.choose(ds < 0, [np.repeat(self.big, n), ne.evaluate("-s/ds", local_dict = {"s":s, "ds":ds})])
			delta_p = min(self.beta*np.min([delta_p_lhs, delta_p_rhs]), 1)
			delta_d_lhs = np.choose(dz < 0, [np.repeat(self.big, n), ne.evaluate("-z/dz", local_dict = {"z":z, "dz":dz})])
			delta_d_rhs = np.choose(dw < 0, [np.repeat(self.big, n), ne.evaluate("-w/dw", local_dict = {"w":w, "dw":dw})])
			delta_d = min(self.beta*np.min([delta_d_lhs, delta_d_rhs]), 1)
			return delta_p, delta_d
Exemple #18
0
	def setIaf(self):
		numpy.seterr(all='ignore')
		self.mask1 = numpy.logical_and(((0.69 - self.savi) / 0.59) > 0, self.mask)
		self.iaf = numpy.choose(self.mask1, (self.valores.noValue, -1 * (numpy.log((0.69 - self.savi) / 0.59) / 0.91)))
		self.mask1 = self.savi <= 0.1
		self.iaf = numpy.choose(self.mask1,(self.iaf, 0.0))
		self.mask1 = self.savi >= 0.687
		self.iaf = numpy.choose(self.mask1,(self.iaf, 6.0))
		numpy.seterr(all='warn')
 def heightmap_slice(self, heightmap):
     # For solid blocks, use the lighting data of the cell above.
     this_layer = get_cells_using_heightmap(self._data, heightmap)
     above_layer = get_cells_using_heightmap(self._data, numpy.clip(heightmap + 1, 0, self.height-1))
     is_transparent = tileid_is_transparent[this_layer['blocks']]
     result = this_layer.copy()
     result['skylight'] = numpy.choose(is_transparent, [above_layer['skylight'], this_layer['skylight']])
     result['blocklight'] = numpy.choose(is_transparent, [above_layer['blocklight'], this_layer['blocklight']])
     return Layer(result)
Exemple #20
0
def bounded(seq, bounds, index=None, clip=True, nearest=True):
    """bound a sequence by bounds = [min,max]

    For example:
    >>> sequence = [0.123, 1.244, -4.755, 10.731, 6.207]
    >>> 
    >>> bounded(sequence, (0,5))
    array([0.123, 1.244, 0.   , 5.   , 5.   ])
    >>> 
    >>> bounded(sequence, (0,5), index=(0,2,4))
    array([ 0.123,  1.244,  0.   , 10.731,  5.   ])
    >>> 
    >>> bounded(sequence, (0,5), clip=False)
    array([0.123     , 1.244     , 3.46621839, 1.44469038, 4.88937466])
    >>> 
    >>> bounds = [(0,5),(7,10)]
    >>> my.constraints.bounded(sequence, bounds)
    array([ 0.123,  1.244,  0.   , 10.   ,  7.   ])
    >>> my.constraints.bounded(sequence, bounds, nearest=False)
    array([ 0.123,  1.244,  7.   , 10.   ,  5.   ])
    >>> my.constraints.bounded(sequence, bounds, nearest=False, clip=False) 
    array([0.123     , 1.244     , 0.37617154, 8.79013111, 7.40864242])
    >>> my.constraints.bounded(sequence, bounds, clip=False)
    array([0.123     , 1.244     , 2.38186577, 7.41374049, 9.14662911])
    >>> 
"""
    seq = array(seq) #XXX: asarray?
    if bounds is None or not bounds: return seq
    if isinstance(index, int): index = (index,)
    if not hasattr(bounds[0], '__len__'): bounds = (bounds,)
    bounds = asfarray(bounds).T  # is [(min,min,...),(max,max,...)]
    # convert None to -inf or inf
    bounds[0][bounds[0] == None] = -inf
    bounds[1][bounds[1] == None] = inf
    # find indicies of the elements that are out of bounds
    at = where(sum((lo <= seq)&(seq <= hi) for (lo,hi) in bounds.T).astype(bool) == False)[-1]
    # find the intersection of out-of-bound and selected indicies
    at = at if index is None else intersect1d(at, index)
    if not len(at): return seq
    if clip:
        if nearest: # clip at closest bounds
            seq_at = seq[at]
            seq[at] = _clip(seq_at, *(b[abs(seq_at.reshape(-1,1)-b).argmin(axis=1)] for b in bounds))
        else: # clip in randomly selected interval
            picks = choice(len(bounds.T), size=at.shape)
            seq[at] = _clip(seq[at], bounds[0][picks], bounds[1][picks])
        return seq
    # limit to +/- 1e300 #XXX: better defaults?
    bounds[0][bounds[0] < -1e300] = -1e300
    bounds[1][bounds[1] > 1e300] = 1e300
    if nearest:
        seq_at = seq[at]
        seq[at] = choose(array([abs(seq_at.reshape(-1,1) - b).min(axis=1) for b in bounds.T]).argmin(axis=0), [uniform(0,1, size=at.shape) * (hi - lo) + lo for (lo,hi) in bounds.T])
    else: # randomly choose a value in one of the intervals
        seq[at] = choose(choice(len(bounds.T), size=at.shape), [uniform(0,1, size=at.shape) * (hi - lo) + lo for (lo,hi) in bounds.T])
    return seq
 def find_point(self, coord):
     """
     Returns the (objects, indices) of grids containing an (x,y,z) point
     """
     mask = np.ones(self.num_grids)
     for i in range(len(coord)):
         np.choose(np.greater(self.grid_left_edge[:, i], coord[i]), (mask, 0), mask)
         np.choose(np.greater(self.grid_right_edge[:, i], coord[i]), (0, mask), mask)
     ind = np.where(mask == 1)
     return self.grids[ind], ind
Exemple #22
0
	def enb_e_e0(self):
		self.ENB = 0.97 + 0.0033 * self.iaf
		self.E0 = 0.95 + 0.01 * self.iaf
		self.mask1 = self.iaf >= 3
		self.ENB = numpy.choose(self.mask1, (self.ENB, 0.98))
		self.E0 = numpy.choose(self.mask1, (self.E0, 0.98))
		self.mask1 = self.ndvi <= 0
		self.ENB = numpy.choose(self.mask1, (self.ENB, 0.99))
		self.E0 = numpy.choose(self.mask1, (self.E0, 0.985))
		self.mask1 = None
Exemple #23
0
    def _filter(self, rgba, **kwargs):
        # transform to linear RGB
        rgb = rgba[:, :3]
        if self._profile == 'sRGB':
            rgb = ColorsRGB._s(rgb)
        elif self._profile == 'gamma':
            rgb = rgb ** self._gamma
        # apply blinding transfrmations
        if self._mode == 'achroma':
            # D65 in sRGB
            z = np.inner(rgba[:, :3], self._achorma_weights)
            rgb = np.tile(z, (3,1)).transpose()
        else:
            xyY = ColorxyYInverse()(rgb)
            lx,ly,lm,lyi = [self._blinder.get(self._mode, 'custom')[i] for i in ('x','y','m','yi')]
            # The confusion line is between the source color and the confusion point
            slope = (xyY[:, 1] - ly) / (xyY[:, 0] - lx)
            # slope, and y-intercept (at x=0)
            yi = xyY[:, 1] - xyY[:, 0] * slope
            # Find the change in the x and y dimensions (no Y change)
            dx = (lyi - yi) / (slope - lm)
            dy = (slope * dx) + yi
            # Find the simulated colors XYZ coords
            zXYZ = np.empty_like(rgb)
            zXYZ[:, 0] = dx * xyY[:, 2] / dy
            zXYZ[:, 1] = xyY[:, 2]
            zXYZ[:, 2] = (1 - (dx + dy)) * xyY[:, 2] / dy
            # Calculate difference between sim color and neutral color
            # find neutral grey using D65 white-point
            ngx = 0.312713 * xyY[:, 2] / 0.329016
            ngz = 0.358271 * xyY[:, 2] / 0.329016
            dXYZ = np.zeros_like(rgb)
            dXYZ[:, 0] = ngx - zXYZ[:, 0]
            dXYZ[:, 2] = ngz - zXYZ[:, 2]
            # find out how much to shift sim color toward neutral to fit in RGB space
            # convert d to linear RGB
            dRGB = ColorXYZ()(dXYZ, clip = False)
            dRGB[np.argwhere(dRGB == 0)] = 1.e-10
            rgb = ColorXYZ()(zXYZ, clip = False)
            _rgb = (np.choose(rgb < 0, (1, 0)) - rgb) / dRGB
            _rgb = np.choose((_rgb > 1) | (_rgb < 0), (_rgb, 0))
            adjust = np.amax(_rgb, axis=1)
            rgb += adjust[:, np.newaxis] * dRGB
        # anomalize
        if self._anomalize:
            rgb[:,:] = (self._achorma_v * rgb + rgba[:, :3]) / self._achorma_n
        # transform back to compressed/non-linerar space
        if self._profile == 'sRGB':
            rgb = ColorsRGBInverse._s(rgb[:,:3])
        elif self._profile == 'gamma':
            rgb = rgba[:,:3] ** (1 / self._gamma)

        rgba[:, :3] = rgb
        return rgba
Exemple #24
0
    def predict(self, *args):

        # Get Input arguments in given sequence
        crv_idxs = args[0]
        corpus_mtrx_lst = args[1]
        gnr_classes = args[2]

        # Get the part of matrices required for the model prediction phase.
        # ###crossval_Y =  cls_gnr_tgs [crv_idxs, :]

        # Initialize Predicted-Classes-Arrays List
        predicted_Y_per_gnr = list()
        predicted_dist_per_gnr = list()

        for g in gnr_classes.keys():

            # Get the part of matrices or arrays required for the model prediction phase
            crossval_X = corpus_mtrx_lst[self.gnrlst_idx[g]][crv_idxs, 0::]
            # params['features_size']]...
            # EXTREMELY IMPORTANT: corpus_mtrx_lst[X] where X=[<idx1>,<idx2>,...,<idxN>]...
            # ...returns ERROR HDF5 when using pytables Earray. For scipy.sparse there is no...
            # ...such a problem. Therefore it always should be used this expression...
            # ...corpus_mtrx_lst[X, :]

            # Converting TF vectors to Binary
            # cv_arr_bin = np.where(crossval_X.toarray() > 0, 1, 0)

            # Getting the predictions for each Vector for this genre
            predicted_Y = gnr_classes[g].predict(crossval_X)
            # For an one-class model, +1 or -1 is returned.
            predicted_D = gnr_classes[g].decision_function(crossval_X)
            # For Sparse Matrices it might require crossval_X.toarray()

            # Assigning Genre-Class tag to Predicted_Y(s)
            predicted_Y = np.where(predicted_Y == 1, self.genres_lst.index(g) + 1, 0)

            # Keeping the prediction per genre
            predicted_Y_per_gnr.append(predicted_Y)
            predicted_dist_per_gnr.append(predicted_D.reshape(predicted_D.shape[0]))

        # Converting it to Array before returning
        predicted_Y_per_gnr = np.vstack(predicted_Y_per_gnr)
        predicted_dist_per_gnr = np.vstack(predicted_dist_per_gnr)

        # Finding index of the Max Positive distances from the Ensembles Predicted...
        # ...distance Array/Matrix
        max_dist_idxs = np.argmax(predicted_dist_per_gnr, axis=0)

        # Keeping the Max Positive distance form Predicted distances Array/Matrix and the...
        # ...respected Predicted Ys
        predicted_scores = np.choose(max_dist_idxs, predicted_dist_per_gnr)
        predicted_Y = np.choose(max_dist_idxs, predicted_Y_per_gnr)

        return (predicted_Y, predicted_scores, predicted_Y_per_gnr, predicted_dist_per_gnr)
    def plot_lena_k_mean(self):
        n_clusters = 5
        np.random.seed(0)
        lena       = self._lena
        X          = lena.reshape((-1, 1))  # We need an (n_sample, n_feature) array
        k_means    = cluster.KMeans(n_clusters=n_clusters, n_init=4)
        k_means.fit(X)
        values     = k_means.cluster_centers_.squeeze()
        labels     = k_means.labels_
        
        # create an array from labels and values
        lena_compressed       = np.choose(labels, values)
        lena_compressed.shape = lena.shape
        
        vmin       = lena.min()
        vmax       = lena.max()
        
        # original lena
        plt.figure(1, figsize=(3, 2.2))
        plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
        plt.savefig("lena_original.jpg")
        
        # compressed lena
        plt.figure(2, figsize=(3, 2.2))
        plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
        plt.savefig("lena_k_means.jpg")
        # equal bins lena
        regular_values = np.linspace(0, 256, n_clusters + 1)
        
        regular_labels = np.searchsorted(regular_values, lena) - 1
        regular_values = .5 * (regular_values[1:] + regular_values[:-1])  # mean
        regular_lena = np.choose(regular_labels.ravel(), regular_values)
        regular_lena.shape = lena.shape
        
        plt.figure(3, figsize=(3, 2.2))
        plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
        plt.savefig("lena_regular.jpg")
        
        # histogram
        plt.figure(4, figsize=(3, 2.2))
        plt.clf()
        plt.axes([.01, .01, .98, .98])
        plt.hist(X, bins=256, color='.5', edgecolor='.5')
        plt.yticks(())
        plt.xticks(regular_values)
        values = np.sort(values)
        for center_1, center_2 in zip(values[:-1], values[1:]):
            plt.axvline(.5 * (center_1 + center_2), color='b')
        
        for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
            plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')

        plt.savefig("lena_hist.jpg")
    def _get_hue ( self ):
        max_rgb = self.max_rgb
        rc      = (max_rgb - self.red)   / self.minus
        gc      = (max_rgb - self.green) / self.minus
        bc      = (max_rgb - self.blue)  / self.minus
        h       = (choose( max_rgb == self.red, [
                           choose( max_rgb == self.green, [
                                   4.0 + gc - rc,
                                   2.0 + rc - bc ] ),
                           bc - gc ] ) / 6.0) % 1.0

        return choose( self.minus == 0.0, [ h, 0.0 ] )
def array3d (surface):
    """pygame.numpyarray.array3d (Surface): return array

    copy pixels into a 3d array

    Copy the pixels from a Surface into a 3D array. The bit depth of the
    surface will control the size of the integer values, and will work
    for any type of pixel format.

    This function will temporarily lock the Surface as pixels are copied
    (see the Surface.lock - lock the Surface memory for pixel access
    method).
    """
    bpp = surface.get_bytesize ()
    array = array2d (surface)

    # Taken from from Alex Holkner's pygame-ctypes package. Thanks a
    # lot.
    if bpp == 1:
        palette = surface.get_palette ()
        # Resolve the correct values using the color palette
        pal_r = numpy.array ([c[0] for c in palette])
        pal_g = numpy.array ([c[1] for c in palette])
        pal_b = numpy.array ([c[2] for c in palette])
        planes = [numpy.choose (array, pal_r),
                  numpy.choose (array, pal_g),
                  numpy.choose (array, pal_b)]
        array = numpy.array (planes, numpy.uint8)
        array = numpy.transpose (array, (1, 2, 0))
        return array
    elif bpp == 2:
        # Taken from SDL_GetRGBA.
        masks = surface.get_masks ()
        shifts = surface.get_shifts ()
        losses = surface.get_losses ()
        vr = (array & masks[0]) >> shifts[0]
        vg = (array & masks[1]) >> shifts[1]
        vb = (array & masks[2]) >> shifts[2]
        planes = [(vr << losses[0]) + (vr >> (8 - (losses[0] << 1))),
                  (vg << losses[1]) + (vg >> (8 - (losses[1] << 1))),
                  (vb << losses[2]) + (vb >> (8 - (losses[2] << 1)))]
        array = numpy.array (planes, numpy.uint8)
        return numpy.transpose (array, (1, 2, 0))
    else:
        masks = surface.get_masks ()
        shifts = surface.get_shifts ()
        losses = surface.get_losses ()
        planes = [((array & masks[0]) >> shifts[0]), # << losses[0], Assume 0
                  ((array & masks[1]) >> shifts[1]), # << losses[1],
                  ((array & masks[2]) >> shifts[2])] # << losses[2]]
        array = numpy.array (planes, numpy.uint8)
        return numpy.transpose (array, (1, 2, 0))
Exemple #28
0
 def tryCreateBuilding(topleft, plan):
     bottomright = (topleft[0] + plan.layout.shape[0],
                    topleft[1] + plan.layout.shape[1])
     if max(bottomright) >= height_map.shape[0]:
         return False  #FIXME Restriction shouldn't exist.
     site_height = height_map[topleft[0]:bottomright[0], topleft[1]:bottomright[1]]
     if site_height.max() - site_height.min() > 50:
         return False
     site_terrain = terrain_map[topleft[0]:bottomright[0], topleft[1]:bottomright[1]]
     site_costs = numpy.choose(site_terrain, ABuildCosts).sum()
     max_cost = 5 * plan.layout.shape[0] * plan.layout.shape[1]
     if site_costs > max_cost:
         return False
     site_terrain[:] = numpy.choose(plan.layout, [TerrainType.ROOFD, TerrainType.WALLS])
     return True
def convert ( hue, lightness, saturation, m1, m2 ):
    """ Returns one channel of an HSL to RGB conversion.
    """
    hue = hue % 1.0
    dm  = m2 - m1

    return (choose( saturation == 0.0, [
                choose( hue < OneSixth, [
                    choose( hue < 0.5, [
                        choose( hue < TwoThirds, [
                            m1,
                            m1 + (dm * (TwoThirds - hue) * 6.0) ] ),
                        m2 ] ),
                    m1 + (dm * hue * 6.0) ] ),
                lightness ] ) * 255.0).astype( uint8 )
Exemple #30
0
def gbernsen(f, se, contrast_threshold, gthresh):
    '''
    thresholded = gbernsen(f, se, contrast_threshold, gthresh)

    Generalised Bernsen local thresholding

    Parameters
    ----------
    f : ndarray
        input image
    se : boolean ndarray
        structuring element to use for "locality"
    contrast_threshold : integer
        contrast threshold
    gthresh : numeric, optional
        global threshold to fall back in low contrast regions

    Returns
    -------
    thresholded : binary ndarray

    See Also
    --------
    bernsen : function
        Bernsen thresholding with a circular region
    '''
    from mahotas.convolve import rank_filter
    fmax = rank_filter(f, se, se.sum()-1)
    fmin = rank_filter(f, se, 0)
    fptp = fmax - fmin
    fmean = fmax/2. + fmin/2. # Do not use (fmax + fmin) as that may overflow
    return np.choose(fptp < contrast_threshold, (fmean < gthresh, fmean > f))
Exemple #31
0
 def test_mixed(self):
     c = np.array([True, True])
     a = np.array([True, True])
     assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
Exemple #32
0
def BARzero(w_F, w_R, DeltaF):
    """A function that when zeroed is equivalent to the solution of
    the Bennett acceptance ratio.

    from http://journals.aps.org/prl/pdf/10.1103/PhysRevLett.91.140601
    D_F = M + w_F - Delta F
    D_R = M + w_R - Delta F

    we want:
    \sum_N_F (1+exp(D_F))^-1 = \sum N_R N_R <(1+exp(-D_R))^-1>
    ln \sum N_F (1+exp(D_F))^-1>_F = \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R
    ln \sum N_F (1+exp(D_F))^-1>_F - \ln \sum N_R exp((1+exp(-D_R))^(-1)>_R = 0

    Parameters
    ----------
    w_F : np.ndarray
        w_F[t] is the forward work value from snapshot t.
        t = 0...(T_F-1)  Length T_F is deduced from vector.
    w_R : np.ndarray
        w_R[t] is the reverse work value from snapshot t.
        t = 0...(T_R-1)  Length T_R is deduced from vector.
    DeltaF : float
        Our current guess

    Returns
    -------
    fzero : float
        a variable that is zeroed when DeltaF satisfies BAR.

    Examples
    --------
    Compute free energy difference between two specified samples of work values.

    >>> from pymbar import testsystems
    >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
    >>> DeltaF = BARzero(w_F, w_R, 0.0)

    """

    np.seterr(over='raise')  # raise exceptions to overflows
    w_F = np.array(w_F, np.float64)
    w_R = np.array(w_R, np.float64)
    DeltaF = float(DeltaF)

    # Recommended stable implementation of BAR.

    # Determine number of forward and reverse work values provided.
    T_F = float(w_F.size)  # number of forward work values
    T_R = float(w_R.size)  # number of reverse work values

    # Compute log ratio of forward and reverse counts.
    M = np.log(T_F / T_R)

    # Compute log numerator. We have to watch out for overflows.  We
    # do this by making sure that 1+exp(x) doesn't overflow, choosing
    # to always exponentiate a negative number.

    # log f(W) = - log [1 + exp((M + W - DeltaF))]
    #          = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
    #          = - maxarg - log(exp[-maxarg] + exp[(M + W - DeltaF) - maxarg])
    # where maxarg = max((M + W - DeltaF), 0)

    exp_arg_F = (M + w_F - DeltaF)
    # use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
    max_arg_F = np.choose(np.less(0.0, exp_arg_F), (0.0, exp_arg_F))
    try:
        log_f_F = -max_arg_F - np.log(
            np.exp(-max_arg_F) + np.exp(exp_arg_F - max_arg_F))
    except:
        # give up; if there's overflow, return zero
        print("The input data results in overflow in BAR")
        return np.nan
    log_numer = logsumexp(log_f_F)

    # Compute log_denominator.
    # log f(R) = - log [1 + exp(-(M + W - DeltaF))]
    #          = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
    #          = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]]
    # where maxarg = max( -(M + W - DeltaF), 0)

    exp_arg_R = -(M - w_R - DeltaF)
    # use boolean logic to zero out the ones that are less than 0, but not if greater than zero.
    max_arg_R = np.choose(np.less(0.0, exp_arg_R), (0.0, exp_arg_R))
    try:
        log_f_R = -max_arg_R - np.log(
            np.exp(-max_arg_R) + np.exp(exp_arg_R - max_arg_R))
    except:
        print("The input data results in overflow in BAR")
        return np.nan
    log_denom = logsumexp(log_f_R)

    # This function must be zeroed to find a root
    fzero = log_numer - log_denom

    np.seterr(
        over='warn'
    )  # return options to standard settings so we don't disturb other functionality.
    return fzero
Exemple #33
0
    def initialize(
        self,
        runon=0.0,
        f_bare=0.7,
        soil_ew=0.1,
        intercept_cap_grass=1.0,
        zr_grass=0.3,
        I_B_grass=20.0,
        I_V_grass=24.0,
        pc_grass=0.43,
        fc_grass=0.56,
        sc_grass=0.33,
        wp_grass=0.13,
        hgw_grass=0.1,
        beta_grass=13.8,
        LAI_max_grass=2.0,
        LAIR_max_grass=2.88,
        intercept_cap_shrub=1.5,
        zr_shrub=0.5,
        I_B_shrub=20.0,
        I_V_shrub=40.0,
        pc_shrub=0.43,
        fc_shrub=0.56,
        sc_shrub=0.24,
        wp_shrub=0.13,
        hgw_shrub=0.1,
        beta_shrub=13.8,
        LAI_max_shrub=2.0,
        LAIR_max_shrub=2.0,
        intercept_cap_tree=2.0,
        zr_tree=1.3,
        I_B_tree=20.0,
        I_V_tree=40.0,
        pc_tree=0.43,
        fc_tree=0.56,
        sc_tree=0.22,
        wp_tree=0.15,
        hgw_tree=0.1,
        beta_tree=13.8,
        LAI_max_tree=4.0,
        LAIR_max_tree=4.0,
        intercept_cap_bare=1.0,
        zr_bare=0.15,
        I_B_bare=20.0,
        I_V_bare=20.0,
        pc_bare=0.43,
        fc_bare=0.56,
        sc_bare=0.33,
        wp_bare=0.13,
        hgw_bare=0.1,
        beta_bare=13.8,
        LAI_max_bare=0.01,
        LAIR_max_bare=0.01,
    ):
        # GRASS = 0; SHRUB = 1; TREE = 2; BARE = 3;
        # SHRUBSEEDLING = 4; TREESEEDLING = 5
        """
        Parameters
        ----------
        grid: RasterModelGrid
            A grid.
        runon: float, optional
            Runon from higher elevation (mm).
        f_bare: float, optional
            Fraction to partition PET for bare soil (None).
        soil_ew: float, optional
            Residual Evaporation after wilting (mm/day).
        intercept_cap: float, optional
            Plant Functional Type (PFT) specific full canopy interception
            capacity.
        zr: float, optional
            Root depth (m).
        I_B: float, optional
            Infiltration capacity of bare soil (mm/h).
        I_V: float, optional
            Infiltration capacity of vegetated soil (mm/h).
        pc: float, optional
            Soil porosity (None).
        fc: float, optional
            Soil saturation degree at field capacity (None).
        sc: float, optional
            Soil saturation degree at stomatal closure (None).
        wp: float, optional
            Soil saturation degree at wilting point (None).
        hgw: float, optional
            Soil saturation degree at hygroscopic point (None).
        beta: float, optional
            Deep percolation constant = 2*b+3 where b is
            water retention (None).
        parameter (None)
        LAI_max: float, optional
            Maximum leaf area index (m^2/m^2).
        LAIR_max: float, optional
            Reference leaf area index (m^2/m^2).
        """

        self._vegtype = self._grid["cell"]["vegetation__plant_functional_type"]
        self._runon = runon
        self._fbare = f_bare
        self._interception_cap = np.choose(
            self._vegtype,
            [
                intercept_cap_grass,
                intercept_cap_shrub,
                intercept_cap_tree,
                intercept_cap_bare,
                intercept_cap_shrub,
                intercept_cap_tree,
            ],
        )

        self._zr = np.choose(
            self._vegtype,
            [zr_grass, zr_shrub, zr_tree, zr_bare, zr_shrub, zr_tree])

        self._soil_Ib = np.choose(
            self._vegtype,
            [I_B_grass, I_B_shrub, I_B_tree, I_B_bare, I_B_shrub, I_B_tree],
        )

        self._soil_Iv = np.choose(
            self._vegtype,
            [I_V_grass, I_V_shrub, I_V_tree, I_V_bare, I_V_shrub, I_V_tree],
        )

        self._soil_Ew = soil_ew
        self._soil_pc = np.choose(
            self._vegtype,
            [pc_grass, pc_shrub, pc_tree, pc_bare, pc_shrub, pc_tree])

        self._soil_fc = np.choose(
            self._vegtype,
            [fc_grass, fc_shrub, fc_tree, fc_bare, fc_shrub, fc_tree])

        self._soil_sc = np.choose(
            self._vegtype,
            [sc_grass, sc_shrub, sc_tree, sc_bare, sc_shrub, sc_tree])

        self._soil_wp = np.choose(
            self._vegtype,
            [wp_grass, wp_shrub, wp_tree, wp_bare, wp_shrub, wp_tree])

        self._soil_hgw = np.choose(
            self._vegtype,
            [hgw_grass, hgw_shrub, hgw_tree, hgw_bare, hgw_shrub, hgw_tree],
        )

        self._soil_beta = np.choose(
            self._vegtype,
            [
                beta_grass, beta_shrub, beta_tree, beta_bare, beta_shrub,
                beta_tree
            ],
        )

        self._LAI_max = np.choose(
            self._vegtype,
            [
                LAI_max_grass,
                LAI_max_shrub,
                LAI_max_tree,
                LAI_max_bare,
                LAI_max_shrub,
                LAI_max_tree,
            ],
        )

        self._LAIR_max = np.choose(
            self._vegtype,
            [
                LAIR_max_grass,
                LAIR_max_shrub,
                LAIR_max_tree,
                LAIR_max_bare,
                LAIR_max_shrub,
                LAIR_max_tree,
            ],
        )
y_min, y_max = -1, 1

# Initialize arrays
x, y = numpy.meshgrid(numpy.linspace(x_min, x_max, SIZE),
                      numpy.linspace(y_min, y_max, SIZE))
c = x + 1j * y
z = c.copy()
fractal = numpy.zeros(z.shape, dtype=numpy.uint8) + MAX_COLOR

# Generate fractal
for n in range(ITERATIONS):
    print n
    mask = numpy.abs(z) <= 4
    z[mask] = z[mask]**2 + c[mask]
    fractal[(fractal == MAX_COLOR)
            & (-mask)] = (MAX_COLOR - 1) * n / ITERATIONS

# Display the fractal
matplotlib.pyplot.subplot(211)
matplotlib.pyplot.imshow(fractal)
matplotlib.pyplot.title('Mandelbrot')
matplotlib.pyplot.axis('off')

# Combine with lena
matplotlib.pyplot.subplot(212)
matplotlib.pyplot.imshow(numpy.choose(fractal < lena, [fractal, lena]))
matplotlib.pyplot.axis('off')
matplotlib.pyplot.title('Mandelbrot + Lena')

matplotlib.pyplot.show()
Exemple #35
0
def test_choose_execution(setup):
    options.chunk_size = 2

    choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23],
               [30, 31, 32, 33]]
    a = choose([2, 3, 1, 0], choices)

    res = a.execute().fetch()
    expected = np.choose([2, 3, 1, 0], choices)

    np.testing.assert_array_equal(res, expected)

    a = choose([2, 4, 1, 0], choices, mode='clip')  # 4 goes to 3 (4-1)
    expected = np.choose([2, 4, 1, 0], choices, mode='clip')

    res = a.execute().fetch()
    np.testing.assert_array_equal(res, expected)

    a = choose([2, 4, 1, 0], choices, mode='wrap')  # 4 goes to (4 mod 4)
    expected = np.choose([2, 4, 1, 0], choices,
                         mode='wrap')  # 4 goes to (4 mod 4)

    res = a.execute().fetch()
    np.testing.assert_array_equal(res, expected)

    a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
    choices = [-10, 10]

    b = choose(a, choices)
    expected = np.choose(a, choices)

    res = b.execute().fetch()
    np.testing.assert_array_equal(res, expected)

    a = np.array([0, 1]).reshape((2, 1, 1))
    c1 = np.array([1, 2, 3]).reshape((1, 3, 1))
    c2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5))

    b = choose(a, (c1, c2))
    expected = np.choose(a, (c1, c2))

    res = b.execute().fetch()
    np.testing.assert_array_equal(res, expected)

    # test order
    a = np.array([0, 1]).reshape((2, 1, 1), order='F')
    c1 = np.array([1, 2, 3]).reshape((1, 3, 1), order='F')
    c2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5), order='F')

    b = choose(a, (c1, c2))
    expected = np.choose(a, (c1, c2))

    res = b.execute().fetch()
    np.testing.assert_array_equal(res, expected)
    assert res.flags['C_CONTIGUOUS'] == expected.flags['C_CONTIGUOUS']
    assert res.flags['F_CONTIGUOUS'] == expected.flags['F_CONTIGUOUS']

    b = choose(a, (c1, c2), out=tensor(np.empty(res.shape, order='F')))
    expected = np.choose(a, (c1, c2), out=np.empty(res.shape, order='F'))

    res = b.execute().fetch()
    np.testing.assert_array_equal(res, expected)
    assert res.flags['C_CONTIGUOUS'] == expected.flags['C_CONTIGUOUS']
    assert res.flags['F_CONTIGUOUS'] == expected.flags['F_CONTIGUOUS']
Exemple #36
0
def ThomasFermi2D(xy, *p):
    """p = [offset, amplitude, size_x, size_y, center_x, center_y]"""
    (x, y) = xy
    TF_profile = TFParab(x, y, p[2], p[3], p[4], p[5])
    return np.choose(TF_profile > 0,
                     [p[0], p[0] + p[1] * np.abs(TF_profile)**(3 / 2)])
            s=50)
plt.title('K-Means Classification')
plt.show()

# In[]:
#Create dataframe for original data comparison with model prediction
iris_df = pd.DataFrame(iris.data)
iris_df.columns = [
    'Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'
]
y.columns = ['Targets']

#Set a variable color_theme to color our data points by their species label.
color_theme = np.array(['darkgray', 'lightsalmon', 'powderblue'])

relabel = np.choose(clustering.labels_, [2, 0, 1]).astype(np.int64)
plt.subplot(1, 2, 1)
plt.scatter(x=iris_df.Petal_Length,
            y=iris_df.Petal_Width,
            c=color_theme[iris.target],
            s=50)
plt.title('Original Dataset Labels')

#Change the color_theme as per the new object 'relabel'.
plt.subplot(1, 2, 2)
plt.scatter(x=iris_df.Petal_Length,
            y=iris_df.Petal_Width,
            c=color_theme[relabel],
            s=50)
plt.title('K-Means Classification')
plt.show()
def logpow(v, p):
    return np.choose(v == 0, [p * np.log(v), 0])
Exemple #39
0
 def __call__(self, w):
     sigma = np.choose(w > self.wp, [0.07, 0.09])
     a = np.exp(-(w / self.wp - 1)**2 / (2 * sigma**2))
     S = ((self.alpha / w**5) *
          np.exp(-(1 / pi) / (w / self.wp / self.kb)**4) * self.gamma**a)
     return S
def gather_elements(data, indices, axis=0):  # type: ignore
    data_swaped = np.swapaxes(data, 0, axis)
    index_swaped = np.swapaxes(indices, 0, axis)
    gathered = np.choose(index_swaped, data_swaped)
    y = np.swapaxes(gathered, 0, axis)
    return y
Exemple #41
0
import scipy as sp
import matplotlib.pyplot as plt

try:
    face = sp.face(gray=True)
except AttributeError:
    from scipy import misc
    face = misc.face(gray=True)

plt.gray()
plt.imshow(face)
plt.show()  # 显示原图

X = face.reshape((-1, 1))
k_means = cluster.KMeans(n_clusters=5, n_init=1)  # 构造分类器,参数n_clusters是K值
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
face_compressed = np.choose(labels, values)  #按照labels的序号对values中的数进行选择。
face_compressed.shape = face.shape

plt.gray()
plt.imshow(face_compressed)
plt.show()  # 显示分类器操作过的图像

len(face_compressed)

face_compressed.shape
X.shape
Exemple #42
0
    Usage()
if inNoData is None:
    Usage()
if outNoData is None:
    Usage()

indataset = gdal.Open(infile, GA_ReadOnly)

out_driver = gdal.GetDriverByName(format)
outdataset = out_driver.Create(outfile, indataset.RasterXSize,
                               indataset.RasterYSize, indataset.RasterCount,
                               type)

gt = indataset.GetGeoTransform()
if gt is not None and gt != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0):
    outdataset.SetGeoTransform(gt)

prj = indataset.GetProjectionRef()
if prj is not None and len(prj) > 0:
    outdataset.SetProjection(prj)

for iBand in range(1, indataset.RasterCount + 1):
    inband = indataset.GetRasterBand(iBand)
    outband = outdataset.GetRasterBand(iBand)

    for i in range(inband.YSize - 1, -1, -1):
        scanline = inband.ReadAsArray(0, i, inband.XSize, 1, inband.XSize, 1)
        scanline = numpy.choose(numpy.equal(scanline, inNoData),
                                (scanline, outNoData))
        outband.WriteArray(scanline, 0, i)
Exemple #43
0
# Plot the Original Classifications
plt.subplot(1, 2, 1)
plt.scatter(x.Petal_Length, x.Petal_Width, c=colormap[y.Targets], s=40)
plt.title('Classification from labels')

# Plot the Models Classifications
plt.subplot(1, 2, 2)
plt.scatter(x.Petal_Length, x.Petal_Width, c=colormap[model.labels_], s=40)
plt.title('K Mean Classification')

plt.show()

#So according to the tutorial the classifier has mislabeled the data.

# The fix, we convert all the 1s to 0s and 0s to 1s to align to the way to the way we want.
predY = np.choose(model.labels_, [1, 0, 2]).astype(np.int64)
print(model.labels_)
print(predY)

# View the results
# Set the size of the plot
plt.figure(figsize=(14, 7))

# Create a colormap
colormap = np.array(['red', 'blue', 'green'])

# Plot Orginal
plt.subplot(1, 2, 1)
plt.scatter(x.Petal_Length, x.Petal_Width, c=colormap[y.Targets], s=40)
plt.title('Real Classification')
Exemple #44
0
 def __call__(self, samples):
     median = np.median(samples[samples > 0])
     return np.choose(samples >= median, (0, samples))
Exemple #45
0
 def choose(self, choices, out=None, mode='raise'):
     # Let __array_function__ take care since choices can be masked too.
     return np.choose(self, choices, out=out, mode=mode)
Exemple #46
0
 def __call__(self, w):
     sigma = np.choose(w > self.wp, [0.07, 0.09])
     a = np.exp(-(w - self.wp)**2 / (2 * sigma**2 * self.wp**2))
     S = ((self.alpha * g**2 / w**5) * np.exp(-1.25 * (self.wp / w)**4) *
          self.gamma**a)
     return S
Exemple #47
0
y[np.array([0,2,4]),1] # if idx_size_mismatch in ix_vec1,ix_vec2 they are "broadcasted"/repeated (ix_vec1 or ix_vex2 is int); Broadcasting" is idx_int->idx_vec (ML trivial)
a=np.random.randn(3,4,2); b=r_[3,7.3]; a*b # a[8 x 1 x 6 x 1],b[7 x 1 x 5]->[8 x 7 x 6 x 5]; breaks on vec1+vec2(ambiguity which first); # yuck,it is true;
x=np.arange(0,50,10); x[np.array([1, 1, 3, 1])] += 1; # note "1" incremented only once
z[1,...,2]=z[1,:,:,2]; # "ellipsis" notation looks unreliable
ix1=(1,slice(3,6)); ix2=(1,ellipsis,2) # =[1,3:6],[1,...,2]; fn(arr,idx) - use tuple for idx
z[list1]=z[list1,:]; z[tuple1]=z[t[0],t[1],...]
## reshape
np.array([2,3,4,5]).reshape(-1,1) # vec_transpose; "-1" means "calculate demaining dim_reshape from oth dim"
y[:,np.newaxis,:].shape # ugly reshape, used for ver2arr
n=5; x=r_[:n]; x[:,np.newaxis],x[np.newaxis,:]; x.reshape(n,1);
a.shape=(a.shape[1],a.shape[0]); # new array created,original deleted; 
b=20<y; y[b]; b[:,5]; y[b[:,5]] ~y[b[:,np.tile(5,(1,y.shape[1]))]].reshape(xx,y.shape[1]) # 
a[:,0,:].shape # it is 2-dim (unlike ML);
a[:] ~ a.copy();
## condition,sort etc
a[1<a]; z=np.nonzero(1<a) '(ar1_x,ar2_y), ~ML [i,j,v]=find(a)'; z=np.where(1<a); np.choose(); a.max(),a.max(0),a.max(1),maximum(a,b),unique(a),np.squeeze(a);
D=B.copy();  # assign matrices by copy; find() may require to convert to vect and back (py pointer helps);
a.ravel().argsort(); a[a[:,0].argsort(),]; a.argsort(axis=1);  # ravel()~flatten,argsort~sort,return idx;

## structured arrays~array_of_tuples;
x=np.zeros((2,),dtype=('i4,f4,a10')); x[:]=[(1,2.,'Hello'),(2,3.,"World")]; x['f1'] # array of tuples,fast_collect
dtype='3int8, float32, (2,3)float64'; # ar_type str/tuple(special extra_info)/list(name,dtype)/dict(special)
# np.ndarray() - access to mem; ndarray can be subclassed

#--- numpy math mix
polyfit(); (a,b)=polyfit(x,y,1);vlinalg.lstsq(x,y); poly(); roots(); fft(a),ifft(a),convolve(x,y); eval('e=4');
linalg.solve(a,b); v=a.compress((a!=0).flat); v=extract(a!=0,a);

## genfromtxt,loadtxt, # .gz,.bz2
from StringIO import StringIO;
# 'skip_header d=0','skip_footer d=0' kills lines; 'comments d=None' kills comments till eol; 'autostrip d=F' kills whsp;
    def initialize(self,
                   Blive_init=102.,
                   Bdead_init=450.,
                   ETthreshold_up=3.8,
                   ETthreshold_down=6.8,
                   Tdmax=10.,
                   w=0.55,
                   WUE_grass=0.01,
                   LAI_max_grass=2.,
                   cb_grass=0.0047,
                   cd_grass=0.009,
                   ksg_grass=0.012,
                   kdd_grass=0.013,
                   kws_grass=0.02,
                   WUE_shrub=0.0025,
                   LAI_max_shrub=2.,
                   cb_shrub=0.004,
                   cd_shrub=0.01,
                   ksg_shrub=0.002,
                   kdd_shrub=0.013,
                   kws_shrub=0.02,
                   WUE_tree=0.0045,
                   LAI_max_tree=4.,
                   cb_tree=0.004,
                   cd_tree=0.01,
                   ksg_tree=0.002,
                   kdd_tree=0.013,
                   kws_tree=0.01,
                   WUE_bare=0.01,
                   LAI_max_bare=0.01,
                   cb_bare=0.0047,
                   cd_bare=0.009,
                   ksg_bare=0.012,
                   kdd_bare=0.013,
                   kws_bare=0.02,
                   **kwds):
        # GRASS = 0; SHRUB = 1; TREE = 2; BARE = 3;
        # SHRUBSEEDLING = 4; TREESEEDLING = 5
        """
        Parameters
        ----------
        grid: RasterModelGrid
            A grid.
        Blive_init: float, optional
            Initial value for vegetation__live_biomass. Converted to field.
        Bdead_init: float, optional
            Initial value for vegetation__dead_biomass. Coverted to field.
        ETthreshold_up: float, optional
            Potential Evapotranspiration (PET) threshold for
            growing season (mm/d).
        ETthreshold_down: float, optional
            PET threshold for dormant season (mm/d).
        Tdmax: float, optional
            Constant for dead biomass loss adjustment (mm/d).
        w: float, optional
            Conversion factor of CO2 to dry biomass (Kg DM/Kg CO2).
        WUE: float, optional
            Water Use Efficiency - ratio of water used in plant water
            lost by the plant through transpiration (KgCO2Kg-1H2O).
        LAI_max: float, optional
            Maximum leaf area index (m2/m2).
        cb: float, optional
            Specific leaf area for green/live biomass (m2 leaf g-1 DM).
        cd: float, optional
            Specific leaf area for dead biomass (m2 leaf g-1 DM).
        ksg: float, optional
            Senescence coefficient of green/live biomass (d-1).
        kdd: float, optional
            Decay coefficient of aboveground dead biomass (d-1).
        kws: float, optional
            Maximum drought induced foliage loss rate (d-1).
        """
        self._vegtype = self.grid["cell"]["vegetation__plant_functional_type"]
        self._WUE = np.choose(
            self._vegtype,
            [WUE_grass, WUE_shrub, WUE_tree, WUE_bare, WUE_shrub, WUE_tree],
        )
        # Water Use Efficiency  KgCO2kg-1H2O
        self._LAI_max = np.choose(
            self._vegtype,
            [
                LAI_max_grass,
                LAI_max_shrub,
                LAI_max_tree,
                LAI_max_bare,
                LAI_max_shrub,
                LAI_max_tree,
            ],
        )
        # Maximum leaf area index (m2/m2)
        self._cb = np.choose(
            self._vegtype,
            [cb_grass, cb_shrub, cb_tree, cb_bare, cb_shrub, cb_tree])
        # Specific leaf area for green/live biomass (m2 leaf g-1 DM)
        self._cd = np.choose(
            self._vegtype,
            [cd_grass, cd_shrub, cd_tree, cd_bare, cd_shrub, cd_tree])
        # Specific leaf area for dead biomass (m2 leaf g-1 DM)
        self._ksg = np.choose(
            self._vegtype,
            [ksg_grass, ksg_shrub, ksg_tree, ksg_bare, ksg_shrub, ksg_tree],
        )
        # Senescence coefficient of green/live biomass (d-1)
        self._kdd = np.choose(
            self._vegtype,
            [kdd_grass, kdd_shrub, kdd_tree, kdd_bare, kdd_shrub, kdd_tree],
        )
        # Decay coefficient of aboveground dead biomass (d-1)
        self._kws = np.choose(
            self._vegtype,
            [kws_grass, kws_shrub, kws_tree, kws_bare, kws_shrub, kws_tree],
        )
        # Maximum drought induced foliage loss rates (d-1)
        self._Blive_init = Blive_init
        self._Bdead_init = Bdead_init
        self._ETthresholdup = ETthreshold_up  # Growth threshold (mm/d)
        self._ETthresholddown = ETthreshold_down  # Dormancy threshold (mm/d)
        self._Tdmax = Tdmax  # Constant for dead biomass loss adjustment
        self._w = w  # Conversion factor of CO2 to dry biomass

        self._Blive_ini = self._Blive_init * np.ones(self.grid.number_of_cells)
        self._Bdead_ini = self._Bdead_init * np.ones(self.grid.number_of_cells)
Exemple #49
0
    def process_scale(a_lods, lod):
        d = a_lods[lod] - cv2.pyrUp(a_lods[lod+1])
        for i in xrange(lod):
            d = cv2.pyrUp(d)
        v = cv2.GaussianBlur(d*d, (3, 3), 0)
        return np.sign(d), v

    scale_num = 6
    for frame_i in count():
        a_lods = [a]
        for i in xrange(scale_num):
            a_lods.append(cv2.pyrDown(a_lods[-1]))
        ms, vs = [], []
        for i in xrange(1, scale_num):
            m, v = process_scale(a_lods, i)
            ms.append(m)
            vs.append(v)
        mi = np.argmin(vs, 0)
        a += np.choose(mi, ms) * 0.025
        a = (a-a.min()) / a.ptp()

        if out:
            out.write(a)
        vis = a.copy()
        draw_str(vis, (20, 20), 'frame %d' % frame_i)
        cv2.imshow('a', vis)
        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
Exemple #50
0
def _calc_steepest_descent_across_adjacent_cells(grid, node_values, *args,
                                                 **kwds):
    """Get steepest gradient to neighbor and diagonal cells.

    Calculate the steepest downward gradients in *node_values*, given at every
    node in the grid, relative to the nodes centered at *cell_ids*. Note that
    upward gradients are reported as positive, so this method returns negative
    numbers.

    If *cell_ids* is not provided, calculate the maximum gradient for all
    cells in the grid.

    The default is to only consider neighbor cells to the north, south, east,
    and west. To also consider gradients to diagonal nodes, set the *method*
    keyword to *d8* (the default is *d4*).

    Use the *out* keyword if you have an array that you want to put the result
    into. If not given, create a new array.

    Use the *return_node* keyword to also the node id of the node in the
    direction of the maximum gradient.

    Parameters
    ----------
    grid : RasterModelGrid
        Input grid.
    node_values : array_like
        Values to take gradient of.
    cell_ids : array_like, optional
        IDs of grid cells to measure gradients.
    return_node: boolean, optional
        Return node IDs of the node that has the steepest descent.
    method : {'d4', 'd8'}
        How to calculate the steepest descent.
    out : ndarray, optional
        Alternative output array in which to place the result.  Must
        be of the same shape and buffer length as the expected output.

    Returns
    -------
    ndarray :
        Calculated gradients to lowest adjacent node.

    Examples
    --------
    Create a rectilinear grid that is 3 nodes by 3 nodes and so has one cell
    centered around node 4.

    >>> from landlab import RasterModelGrid
    >>> grid = RasterModelGrid(3, 3)
    >>> values_at_nodes = np.array([-3., -1., 0., 0., 1., 0., 0., 0., 0.])

    Calculate gradients to cell diagonals and choose the gradient to the
    lowest node.

    >>> from math import sqrt
    >>> grid._calc_steepest_descent_across_adjacent_cells(values_at_nodes,
    ...     method='d4')
    masked_array(data = [-2.],
                 mask = False,
           fill_value = 1e+20)
    <BLANKLINE>
    >>> grid._calc_steepest_descent_across_adjacent_cells(values_at_nodes,
    ...     method='d8') * sqrt(2.)
    masked_array(data = [-4.],
                 mask = False,
           fill_value = 1e+20)
    <BLANKLINE>

    With the 'd4' method, the steepest gradient is to the bottom node (id = 1).

    >>> (_, ind) = grid._calc_steepest_descent_across_adjacent_cells(
    ...                values_at_nodes, return_node=True)
    >>> ind
    array([1])

    With the 'd8' method, the steepest gradient is to the lower-left node
    (id = 0).

    >>> (_, ind) = grid._calc_steepest_descent_across_adjacent_cells(
    ...                values_at_nodes, return_node=True, method='d8')
    >>> ind
    array([0])

    >>> from landlab import RasterModelGrid
    >>> grid = RasterModelGrid(4, 4)
    >>> node_values = grid.zeros()
    >>> node_values[1] = -1
    >>> grid._calc_steepest_descent_across_adjacent_cells(node_values, 0)
    masked_array(data = [-1.],
                 mask = False,
           fill_value = 1e+20)
    <BLANKLINE>

    Get both the maximum gradient and the node to which the gradient is
    measured.

    >>> grid._calc_steepest_descent_across_adjacent_cells(
    ...     node_values, 0, return_node=True)
    (array([-1.]), array([1]))

    Use method to choose which neighbors to consider.

    >>> node_values[0] = -10.
    >>> node_values[1] = -1.
    >>> grid._calc_steepest_descent_across_adjacent_cells(
    ...     node_values, 0, method='d4', return_node=True)
    (array([-1.]), array([1]))
    >>> grid._calc_steepest_descent_across_adjacent_cells(
    ...     node_values, 0, method='d8', return_node=True)
    (array([-7.07106781]), array([0]))
    """
    method = kwds.pop('method', 'd4')
    _assert_valid_routing_method(method)

    if method == 'd4':
        return _calc_steepest_descent_across_cell_faces(
            grid, node_values, *args, **kwds)
    elif method == 'd8':
        neighbor_grads = _calc_steepest_descent_across_cell_faces(
            grid, node_values, *args, **kwds)
        diagonal_grads = _calc_steepest_descent_across_cell_corners(
            grid, node_values, *args, **kwds)

        return_node = kwds.pop('return_node', False)

        if not return_node:
            return np.choose(neighbor_grads <= diagonal_grads,
                             (diagonal_grads, neighbor_grads), **kwds)
        else:
            min_grads = np.choose(neighbor_grads[0] <= diagonal_grads[0],
                                  (diagonal_grads[0], neighbor_grads[0]),
                                  **kwds)
            node_ids = np.choose(neighbor_grads[0] <= diagonal_grads[0],
                                 (diagonal_grads[1], neighbor_grads[1]),
                                 **kwds)
            return (min_grads, node_ids)
for name, est in estimators.items():
    est.fit(X)
    labels = est.labels_

    trace = go.Scatter3d(x=X[:, 0], y=X[:, 1], z=X[:, 2],
                         showlegend=False,
                         mode='markers',
                         marker=dict(
                                color=labels.astype(np.float),
                                line=dict(color='black', width=1)
        ))
    fig.append_trace(trace, 1, fignum)
    
    fignum = fignum + 1

y = np.choose(y, [0,1,2]).astype(np.float)

trace1 = go.Scatter3d(x=X[:, 0], y=X[:, 1], z=X[:, 2],
                      showlegend=False,
                      mode='markers',
                      marker=dict(
                            color=y,
                            line=dict(color='black', width=1)))
fig.append_trace(trace1, 2, 1)

fig['layout'].update(height=1400, width=1000,
                     margin=dict(l=10,r=10))

fig['layout']['scene1'].update(scene)
fig['layout']['scene2'].update(scene)
fig['layout']['scene3'].update(scene)
Exemple #52
0
def variadic_choose(a, *choices):
    return np.choose(a, choices)
				rasterPoly = imageToArray(rasterPoly)
				if not rasterPoly:
					return
				
				#Remove the pixels from this mask that do not overlap
				#with the raster
				#get the indices for trimming the array
				minX = min(0, -pixExtent[0])
				maxX = max(cols, pixExtent[1])-cols-1
				minY = max(rows, pixExtent[3])-rows
				maxY = -(min(0, -pixExtent[3]) + 1)
				#Trim
				rasterPoly = rasterPoly[minX:maxX,minY,maxY]
		
				#Apply the mask to the raster
				masked  = np.choose(rasterPoly,(clip, 0), mode='raise').astype(d_type)
				
				#Get unique values and counts
				u, c = np.unique(masked, return_counts=True)
				
				U.append(u)
				C.append(c)
				
				count += 1
			
			#Combine all the values of the individual polygons to put in tabulate
			u = set(x for l in U for x in l) #Get all unique values for the multipolygon
			#Loop through the combined values and get the sum of counts
			for uval in u:
				cval = 0
				for i in range(len(C)):
def write_raster_inds(n, r, g, b, ds, sel, rgb_File):
    print sel
    gam = 1.7

    mask = numpy.greater(n + r + g + b, 0)
    if sel == 0:  #DVI
        # print 0
        inds = numpy.choose(mask, (0, n - r))
    elif sel == 1:  ##NDVI
        inds = numpy.choose(mask, (0, (n - r) / (n + r)))
#    elif sel==2:#GARI
#        inds=numpy.choose(mask, (0,(n-(g-gam*(b-r)))/(n+(g-gam*(b-r)))))
    elif sel == 2:  #GNDVI
        inds = numpy.choose(mask, (0, (n - g) / (n + g)))

    elif sel == 3:  #OSAVI
        inds = numpy.choose(mask, (0, 1.5 * (n - r) / (n + r + 0.16)))
    elif sel == 4:  #RDVI
        inds = numpy.choose(mask, (0, (n - r) / numpy.sqrt(n + r)))
#    elif sel==6: #RVI
#        inds= numpy.choose(mask, (0,n/r))
    elif sel == 5:  #SAVI
        inds = numpy.choose(mask, (0, 1.5 * (n - r) / (n + r + 0.5)))
    elif sel == 6:  #TDVI
        inds = numpy.choose(mask, (0, numpy.sqrt(0.5 + (n - r) / (n + r))))
    elif sel == 9:
        inds = numpy.choose(mask, (0, (n - g) / (n + g)))
    elif sel == 10:
        inds = (r - g) / (r + g)
    # inds=numpy.choose(mask, (0,(r - g)/(r + g)))
    elif sel == 11:
        inds = numpy.choose(mask,
                            (0, (n - 0.5 * (r + g)) / (n + 0.5 * (r + g))))
    elif sel == 12:
        inds = numpy.choose(mask, (0, numpy.sqrt((n - r) / (n + r) + 1)))
    elif sel == 13:
        inds = numpy.choose(mask, (0, n / g))
    else:

        inds = numpy.choose(mask, (0, 200 * (g * 100) /
                                   (g * 100 + r * 100 + b * 100)))

#ave_i=(max_i-ave_i)/50+ave_i
#    print "bbeeeffoorreeee"
#    print numpy.min(inds)
#    print numpy.max(inds)
#    print numpy.mean(inds)
#    print numpy.var(inds)
#    print "nnnooorrrmmmaaallliizzeeee"
#
    inds = inds - numpy.mean(inds)
    inds = inds / numpy.std((inds))

    #    inds=inds/numpy.max((inds))
    #
    #    print numpy.min(inds)
    #    print numpy.max(inds)
    #    print numpy.mean(inds)
    #    jjjjj
    ####normalize al together afterwards

    #    maxi=numpy.max(inds)
    #    mini=numpy.min(inds)
    #    inds=inds*2/(maxi-mini)
    #    inds=inds-1
    mask2 = numpy.equal(r + g + b, 0)
    inds += numpy.ones((inds.shape[0], inds.shape[1])) * 0.0001
    # print numpy.size(mask2)
    # ccrs=crs.from_string("EPSG:32647")
    inds[mask2] = numpy.inf
    src = rasterio.open(rgb_File)

    new_dataset = rasterio.open('Ind' + str(sel) + '.tif',
                                'w',
                                driver='Gtiff',
                                height=inds.shape[0],
                                width=inds.shape[1],
                                count=1,
                                dtype=str(inds.dtype),
                                crs=src.crs,
                                transform=src.transform)

    new_dataset.write(inds, 1)
    new_dataset.close()
Exemple #55
0
def stineman_interp(xi, x, y, yp=None):
    """
    Given data vectors *x* and *y*, the slope vector *yp* and a new
    abscissa vector *xi*, the function :func:`stineman_interp` uses
    Stineman interpolation to calculate a vector *yi* corresponding to
    *xi*.

    Here's an example that generates a coarse sine curve, then
    interpolates over a finer abscissa::

      x = linspace(0,2*pi,20);  y = sin(x); yp = cos(x)
      xi = linspace(0,2*pi,40);
      yi = stineman_interp(xi,x,y,yp);
      plot(x,y,'o',xi,yi)

    The interpolation method is described in the article A
    CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
    W. Stineman. The article appeared in the July 1980 issue of
    Creative Computing with a note from the editor stating that while
    they were:

      not an academic journal but once in a while something serious
      and original comes in adding that this was
      "apparently a real solution" to a well known problem.

    For *yp* = *None*, the routine automatically determines the slopes
    using the :func:`slopes` routine.

    *x* is assumed to be sorted in increasing order.

    For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
    tries an extrapolation.  The relevance of the data obtained from
    this, of course, is questionable...

    Original implementation by Halldor Bjornsson, Icelandic
    Meteorolocial Office, March 2006 halldor at vedur.is

    Completely reworked and optimized for Python by Norbert Nemec,
    Institute of Theoretical Physics, University or Regensburg, April
    2006 Norbert.Nemec at physik.uni-regensburg.de
    """

    # Cast key variables as float.
    x = np.asarray(x, np.float_)
    y = np.asarray(y, np.float_)
    assert x.shape == y.shape
    # N = len(y)

    if yp is None:
        yp = slopes(x, y)
    else:
        yp = np.asarray(yp, np.float_)

    xi = np.asarray(xi, np.float_)
    # yi = np.zeros(xi.shape, np.float_)

    # calculate linear slopes
    dx = x[1:] - x[:-1]
    dy = y[1:] - y[:-1]
    s = dy / dx  # note length of s is N-1 so last element is #N-2

    # find the segment each xi is in
    # this line actually is the key to the efficiency of this implementation
    idx = np.searchsorted(x[1:-1], xi)

    # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
    # except at the boundaries, where it may be that xi[j] < x[0] or xi[j] >
    # x[-1]

    # the y-values that would come out from a linear interpolation:
    sidx = s.take(idx)
    xidx = x.take(idx)
    yidx = y.take(idx)
    xidxp1 = x.take(idx + 1)
    yo = yidx + sidx * (xi - xidx)

    # the difference that comes when using the slopes given in yp
    # using the yp slope of the left point
    dy1 = (yp.take(idx) - sidx) * (xi - xidx)
    # using the yp slope of the right point
    dy2 = (yp.take(idx + 1) - sidx) * (xi - xidxp1)

    dy1dy2 = dy1 * dy2
    # The following is optimized for Python. The solution actually
    # does more calculations than necessary but exploiting the power
    # of numpy, this is far more efficient than coding a loop by hand
    # in Python
    dy1mdy2 = np.where(dy1dy2, dy1 - dy2, np.inf)
    dy1pdy2 = np.where(dy1dy2, dy1 + dy2, np.inf)
    yi = yo + dy1dy2 * np.choose(
        np.array(np.sign(dy1dy2), np.int32) + 1,
        ((2 * xi - xidx - xidxp1) / ((dy1mdy2) * (xidxp1 - xidx)), 0.0, 1 /
         (dy1pdy2)))
    return yi
Exemple #56
0
    ax.set_title(titles[fignum - 1])
    ax.dist = 12
    fignum = fignum + 1

# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)

for name, label in [('Setosa', 0),
                    ('Versicolour', 1),
                    ('Virginica', 2)]:
    ax.text3D(X[y == label, 3].mean(),
              X[y == label, 0].mean(),
              X[y == label, 2].mean() + 2, name,
              horizontalalignment='center',
              bbox=dict(alpha=.2, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor='k')

ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title('Ground Truth')
ax.dist = 12

fig.show()
Exemple #57
0
def calc_u_S(chi, g_0, mpy, non_mag):
    '''Calculates the interface matrix S and the u values.
    '''
    chi_xx, chi_xy, chi_xz = chi[0]
    chi_yx, chi_yy, chi_yz = chi[1]
    chi_zx, chi_zy, chi_zz = chi[2]
    # Check if the chi_xx has the correct format already or not
    if len(chi_xx.shape) < 2:
        trans = np.ones(g_0.shape, dtype=np.complex128)
        chi_xx = trans * chi_xx[:, np.newaxis]
        chi_xy = trans * chi_xy[:, np.newaxis]
        chi_xz = trans * chi_xz[:, np.newaxis]
        chi_yx = trans * chi_yx[:, np.newaxis]
        chi_yy = trans * chi_yy[:, np.newaxis]
        chi_yz = trans * chi_yz[:, np.newaxis]
        chi_zx = trans * chi_zx[:, np.newaxis]
        chi_zy = trans * chi_zy[:, np.newaxis]
        chi_zz = trans * chi_zz[:, np.newaxis]

    n_x = np.sqrt(1.0 - g_0 ** 2)
    q1 = 1 + chi_zz
    q2 = n_x * (chi_xz + chi_zx)
    q3 = (chi_xz * chi_zx + chi_yz * chi_zy - (1 + chi_zz) * (g_0 ** 2 + chi_yy)
          - (1 + chi_xx) * (g_0 ** 2 + chi_zz))
    q4 = n_x * (chi_xy * chi_yz + chi_yx * chi_zy -
                (chi_xz + chi_zx) * (g_0 ** 2 + chi_yy))
    q5 = ((1 + chi_xx) * ((g_0 ** 2 + chi_yy) * (g_0 ** 2 + chi_zz) -
                          chi_yz * chi_zy)
          - chi_xy * chi_yx * (g_0 ** 2 + chi_zz)
          - chi_xz * chi_zx * (g_0 ** 2 + chi_yy)
          + chi_xy * chi_zx * chi_yz + chi_yx * chi_xz * chi_zy)
    # Note assuming C = 0 => q2 = 0 and q4 = 0
    # And this yields the following eigenstates
    #c = np.sqrt(q3**2 - 4*q1*q5)
    #u1 = np.sqrt((-q3 - c)/2.0/q1)
    #u2 = np.sqrt((-q3 + c)/2.0/q1)
    #u3 = -u1
    #u4 = -u2
    # End simplifaction
    # Proper solution to a 4'th degree polynomial
    u1, u2, u3, u4 = roots4thdegree(q1, q2, q3, q4, q5)
    #print u1[:,ind];print u2[:,ind];print u3[:,ind];print u4[:,ind]
    # Special case M||X for finding errors in the code:
    #u1 = -np.sqrt(g_0**2 + trans*(chi0 + A - B)[:, np.newaxis])
    #u2 = np.sqrt(g_0**2 + trans*(chi0 + A + B)[:, np.newaxis])
    #u3 = -np.sqrt(g_0**2 + trans*(chi0 + A + B)[:, np.newaxis])
    #u4 = np.sqrt(g_0**2 + trans*(chi0 + A - B)[:, np.newaxis])
    # End special case
    # I am lazy and simply sort the roots in ascending order to get the
    # right direction of the light later
    u_temp = np.array((u1, u2, u3, u4))
    pos = np.argsort(u_temp.imag, axis=0)
    u_temp = np.choose(pos, u_temp)
    u = np.zeros(u_temp.shape, dtype=np.complex128)
    u[0] = u_temp[3]
    u[1] = u_temp[2]
    u[2] = u_temp[1]
    u[3] = u_temp[0]

    D = ((chi_xz + u * n_x) * (chi_zx + u * n_x) -
         (1.0 - u ** 2 + chi_xx) * (g_0 ** 2 + chi_zz))
    # These expressions can throw a runtime warning if D has an incorrect value
    # The different special cases are handled later by non-magnetic flags as well
    # as m//y flag.
    old_error_vals = np.seterr(invalid='ignore')
    P_x = (chi_xy * (g_0 ** 2 + chi_zz) -
           chi_zy * (chi_xz + u * n_x)) / D
    P_z = (chi_zy * (1.0 - u ** 2 + chi_xx) -
           chi_xy * (chi_zx + u * n_x)) / D
    np.seterr(invalid=old_error_vals['invalid'])

    S = np.zeros((4, 4, chi_xx.shape[0], g_0.shape[1]), dtype=np.complex128)
    # The ambient layer
    S[0, 0, 0] = 1.0
    S[0, 2, 0] = 1.0
    S[1, 1, 0] = 1.0
    S[1, 3, 0] = 1.0
    S[2, 0, 0] = g_0[0]
    S[2, 2, 0] = -g_0[0]
    S[3, 1, 0] = g_0[0]
    S[3, 3, 0] = -g_0[0]
    u[0, 0] = g_0[0]
    u[1, 0] = g_0[0]
    u[2, 0] = -g_0[0]
    u[3, 0] = -g_0[0]
    # The rest of the multilayers
    v = (u * P_x - n_x * P_z)[:, 1:]
    w = P_x[:, 1:]
    S[0, :, 1:] = 1.0
    S[1, :, 1:] = v
    S[2, :, 1:] = u[:, 1:]
    S[3, :, 1:] = w

    # Handle the layers that are non-magnetic
    # Removing to test if roughness calcs are affected
    chi = chi_xx[non_mag]
    nm_u1 = np.sqrt(g_0[non_mag] ** 2 + chi)
    nm_u2 = -nm_u1
    sqr_eps = np.sqrt(1 + chi)
    S[0, 0, non_mag] = 1.0
    S[0, 1, non_mag] = 0.0
    S[0, 2, non_mag] = 1.0
    S[0, 3, non_mag] = 0.0
    S[1, 0, non_mag] = 0.0
    S[1, 1, non_mag] = sqr_eps
    S[1, 2, non_mag] = 0.0
    S[1, 3, non_mag] = sqr_eps
    S[2, 0, non_mag] = nm_u1
    S[2, 1, non_mag] = 0.0
    S[2, 2, non_mag] = nm_u2
    S[2, 3, non_mag] = 0.0
    S[3, 0, non_mag] = 0.0
    S[3, 1, non_mag] = nm_u1 / sqr_eps
    S[3, 2, non_mag] = 0.0
    S[3, 3, non_mag] = nm_u2 / sqr_eps
    u[0, non_mag] = nm_u1
    u[1, non_mag] = nm_u1
    u[2, non_mag] = nm_u2
    u[3, non_mag] = nm_u2

    # Take into account the matrix singularity arising when M||Y
    if np.any(mpy):
        #print 'M||Y calcs activated'
        delta = chi_xz[mpy] ** 2 * (1 + chi_xx[mpy])
        nx = n_x[mpy]
        mpy_u1 = np.sqrt(g_0[mpy] ** 2 + chi_yy[mpy])
        mpy_u3 = -mpy_u1
        mpy_u2 = np.sqrt(g_0[mpy] ** 2 + chi_zz[mpy])
        mpy_u4 = -mpy_u2
        S[0, 0, mpy] = 1.0
        S[0, 1, mpy] = 0.0
        S[0, 2, mpy] = 1.0
        S[0, 3, mpy] = 0.0
        S[1, 0, mpy] = 0.0
        S[1, 1, mpy] = -(mpy_u2 * chi_xz[mpy] + nx * (1 + chi_xx[mpy])) / (nx ** 2 - delta)
        S[1, 2, mpy] = 0.0
        S[1, 3, mpy] = -(mpy_u4 * chi_xz[mpy] + nx * (1 + chi_xx[mpy])) / (nx ** 2 - delta)
        S[2, 0, mpy] = mpy_u1
        S[2, 1, mpy] = 0.0
        S[2, 2, mpy] = mpy_u3
        S[2, 3, mpy] = 0.0
        S[3, 0, mpy] = 0.0
        S[3, 1, mpy] = -(mpy_u2 * nx + chi_xz[mpy]) / (nx ** 2 - delta)
        S[3, 2, mpy] = 0.0
        S[3, 3, mpy] = -(mpy_u4 * nx + chi_xz[mpy]) / (nx ** 2 - delta)
        u[0, mpy] = mpy_u1
        u[1, mpy] = mpy_u2
        u[2, mpy] = mpy_u3
        u[3, mpy] = mpy_u4

    return u, S
def kmeans_cluster(pair_col, knn_cluster_col, order):
    model = KMeans(n_clusters=5)
    k_mean = rank_analysis_sct[[pair_col]]
    model.fit(k_mean)
    pred = np.choose(model.labels_, order).astype(np.int64)  # Assigning correct labels
    rank_analysis_sct[knn_cluster_col] = pred  # Adding column of cluster information to dataset
    px.set_zlabel('Petal length')

    px.dist = 12
    fignum = fignum + 1

fig = plt.figure(fignum, figsize=(4, 3))
px = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)

for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
    px.text3D(P[q == label, 3].mean(),
              P[q == label, 0].mean(),
              P[q == label, 2].mean() + 2,
              name,
              horizontalalignment='center',
              bbox=dict(alpha=.2, edgecolor='w', facecolor='w'))
q = np.choose(q, [1, 3, 0]).astype(np.float)
px.scatter(P[:, 3], P[:, 0], P[:, 2], c=q, edgecolor='k')

px.w_xaxis.set_ticklabels([])
px.w_yaxis.set_ticklabels([])
px.w_zaxis.set_ticklabels([])
px.set_xlabel('Petal width')
px.set_ylabel('Sepal length')
px.set_zlabel('Petal length')
px.set_title('Ground Truth')
px.dist = 12

fig.show()

# In[ ]:
y = mydata['quality']

#let's estimate 8 clusters and tune

kmeans = cluster.KMeans(n_clusters=8)
kmeans.fit(x)

#let's define labels and centroids

labels = kmeans.labels_
centroids = kmeans.cluster_centers_

#print(labels)

#Now let's define predicted class labels
predY = np.choose(labels, [1, 2, 3, 4, 5, 6, 7, 8]).astype(np.int64)

#Let's evaluate the clusters

#Check accuracy score

accuracy_score = metrics.accuracy_score(y, predY)

#print accuracy_score

# Compute Silhouette Score
# returns error saying Passing 1d arrays as data is deprecated in 0.17 and willraise ValueError in 0.19.
silhouette = metrics.silhouette_score(y, predY, metric='euclidean')

#print silhouette