def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = float("-inf")
        weights = weights.copy()
        weights[selection] = 0.0

        selection = numpy.empty(q.shape, dtype=numpy.bool)
        for threshold, sub in self.bins:
            numpy.less(q, threshold, selection)
            subweights[:] = weights
            subweights[selection] = 0.0

            sub._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
 def _getinvisible(self):
     if self.invisible is not None:
         inv = self.invisible
     else:
         inv = np.zeros(len(self.atoms))
     if self.invisibilityfunction:
         inv = np.logical_or(inv, self.invisibilityfunction(self.atoms))
     r = self._getpositions()
     if len(r) > len(inv):
         # This will happen in parallel simulations due to ghost atoms.
         # They are invisible.  Hmm, this may cause trouble.
         i2 = np.ones(len(r))
         i2[:len(inv)] = inv
         inv = i2
         del i2
     if self.cut["xmin"] is not None:
         inv = np.logical_or(inv, np.less(r[:,0], self.cut["xmin"]))
     if self.cut["xmax"] is not None:
         inv = np.logical_or(inv, np.greater(r[:,0], self.cut["xmax"]))
     if self.cut["ymin"] is not None:
         inv = np.logical_or(inv, np.less(r[:,1], self.cut["ymin"]))
     if self.cut["ymax"] is not None:
         inv = np.logical_or(inv, np.greater(r[:,1], self.cut["ymax"]))
     if self.cut["zmin"] is not None:
         inv = np.logical_or(inv, np.less(r[:,2], self.cut["zmin"]))
     if self.cut["zmax"] is not None:
         inv = np.logical_or(inv, np.greater(r[:,2], self.cut["zmax"]))
     return inv
Exemple #3
0
def generateErrors(L,p):
    # Generate errors on each edge independently with probability p
    edgesX = np.less(np.random.rand(L,L),p) # Errors on horizontal edges
    edgesY = np.less(np.random.rand(L,L),p) # Errors on vertical edges

    n = np.sum(edgesX) + np.sum(edgesY)
##    print 'n = %d'%n
    
    A = findSyndromes(edgesX,edgesY,L)
##    print 'lattice'
##    printLattice(A,[],edgesX,edgesY,L)
    pairsA = findPairs(A,edgesX,edgesY,L)
    correctErrorsA(edgesX,edgesY,pairsA,L)
    A = findSyndromes(edgesX,edgesY,L)
##    print 'correctedLattice1'
##    printLattice(A,[],edgesX,edgesY,L)
    
    pairsA = findPairs(A,edgesX,edgesY,L)
    correctErrorsA(edgesX,edgesY,pairsA,L)
##    A = findSyndromes(edgesX,edgesY,L)
##    B = findSyndromesZ(edgesX,edgesY,L)
##    print 'correctedLattice2'
##    printLattice(A,[],edgesX,edgesY,L)
##    print logicalX(edgesX,L)
##    print logicalZ(edgesY,L)
    return logicalX(edgesX,L)&logicalZ(edgesY,L)
    def apply(self, pict):

        # get min diff & centroid assigned
        min_diff = np.multiply(np.ones_like(pict, 'float64'), -1)
        assigned = np.zeros_like(pict, 'uint8')
        new_bg = np.multiply(np.ones_like(pict, 'uint8'), 255)

        for i in range(self.K):
            # get diff
            cur_diff = np.multiply(np.ones_like(pict, 'float64'), ((pict - self.centroids[i]) ** 2))
            assigned = np.where(np.logical_or(np.equal(min_diff, -1), np.less(cur_diff, min_diff)), i, assigned)
            min_diff = np.where(np.logical_or(np.equal(min_diff, -1), np.less(cur_diff, min_diff)), cur_diff, min_diff)

        # update the centroids and weight
        for i in range(self.K):
            update_centroids = np.multiply(
                np.ones_like(pict, 'float64'),
                (np.add(self.centroids[i], self.alpha * np.subtract(pict, self.centroids[i])))
            )
            self.centroids[i] = np.where(np.equal(assigned, i), update_centroids, self.centroids[i])
            self.w[i] = np.where(np.equal(assigned, i), np.add(np.multiply((1. - self.alpha), self.w[i]), self.alpha),
                                 np.multiply((1. - self.alpha), self.w[i]))
            new_bg = np.where(np.logical_and(np.equal(assigned, i), np.greater(self.w[i], 1. / self.K)), 0, new_bg)

        return new_bg
def computePeakPowerPerChannel(lfp,Fs,stim_freq,t_start,t_end,freq_window):
	'''
	Input:
		- lfp: dictionary with one entry per channel of array of lfp samples
		- Fs: sample frequency in Hz
		- stim_freq: frequency to notch out when normalizing spectral power
		- t_start: time window start in units of sample number
		- t_end: time window end in units of sample number
		- freq_window: frequency band over which to look for peak power, should be of form [f_low,f_high]
	Output:
		- peak_power: an array of length equal to the number of channels, containing the peak power of each channel in 
					  the designated frequency band
	'''
	channels = lfp.keys()
	f_low = freq_window[0]
	f_high = freq_window[1]
	counter = 0
	peak_power = np.zeros(len(channels))
	
	for chann in channels:
		lfp_snippet = lfp[chann][t_start:t_end]
		num_timedom_samples = lfp_snippet.size
		freq, Pxx_den = signal.welch(lfp_snippet, Fs, nperseg=512, noverlap=256)
 		norm_freq = np.append(np.ravel(np.nonzero(np.less(freq,stim_freq-3))),np.ravel(np.nonzero(np.less(freq,stim_freq+3))))
 		total_power_Pxx_den = np.sum(Pxx_den[norm_freq])
 		Pxx_den = Pxx_den/total_power_Pxx_den

 		freq_band = np.less(freq,f_high)&np.greater(freq,f_low)
 		freq_band_ind = np.ravel(np.nonzero(freq_band))
 		peak_power[counter] = np.max(Pxx_den[freq_band_ind])
 		counter += 1

	return peak_power
def computeSTA(spike_file,tdt_signal,channel,t_start,t_stop):
	'''
	Compute the spike-triggered average (STA) for a specific channel overa  designated time window
	[t_start,t_stop].

	spike_file should be the results of plx = plexfile.openFile('filename.plx') and spike_file = plx.spikes[:].data
	tdt_signal should be the array of time-stamped values just for this channel
	'''
	channel_spikes = [entry for entry in spike_file if (t_start <= entry[0] <= t_stop)&(entry[1]==channel)]
	units = [spike[2] for spike in channel_spikes]
	unit_vals = set(units)  # number of units
	unit_vals.remove(0) 	# value 0 are units marked as noise events
	unit_sta = dict()

	tdt_times = np.ravel(tdt_signal.times)
	tdt_data = np.ravel(tdt_signal)

	for unit in unit_vals:
		
		spike_times = [spike[0] for spike in channel_spikes if (spike[2]==unit)]
		start_avg = [(time - 1) for time in spike_times] 	# look 1 s back in time until 1 s forward in time from spike
		stop_avg = [(time + 1) for time in spike_times]
		epoch = np.logical_and(np.greater(tdt_times,start_avg[0]),np.less(tdt_times,stop_avg[0]))
		epoch_inds = np.ravel(np.nonzero(epoch))
		len_epoch = len(epoch_inds)
		sta = np.zeros(len_epoch)
		num_spikes = len(spike_times)
		for i in range(0,num_spikes):
			epoch = np.logical_and(np.greater(tdt_times,start_avg[i]),np.less(tdt_times,stop_avg[i]))
			epoch_inds = np.ravel(np.nonzero(epoch))
			if (len(epoch_inds) == len_epoch):
				sta += tdt_data[epoch_inds]
		unit_sta[unit] = sta/float(num_spikes)

	return unit_sta
    def on_epoch_end(self, epoch, logs={}):
        global DROPOUT_RATES
        assert hasattr(self.model.optimizer, 'lr'), \
            'Optimizer must have a "lr" attribute.'
        current = logs.get('val_loss')
        if not np.less(current, self.previous):
            if self.wait > self.patience:
                self.wait = 0.0
                lr = self.model.optimizer.get_config()["lr"]
                print(lr, type(lr))
                if self.verbose > 0:
                    print("decreasing learning rate %f to %f" % (lr, lr / 1.01))
                K.set_value(self.model.optimizer.lr, lr / self.lr_divide)
                K.set_value(self.model.drop)
            else:
                self.wait += 1
                print("increasing dropout rates: " + ",".join([str(i) for i in DROPOUT_RATES]))
                for i, j in enumerate(DROPOUT_RATES):
                    DROPOUT_RATES[i] = j * 1.05
                print("new dropout rates: " + ",".join([str(i) for i in DROPOUT_RATES]))
        else:
            self.wait = 0.0
            if np.less(current, self.best_loss):
                lr = self.model.optimizer.get_config()["lr"]
                print(lr, type(lr))
                K.set_value(self.model.optimizer.lr, lr * 1.01)
                print("increasing learning rate from %f to %f" % (lr, lr / 1.05))
                print("decreasing dropout rates: " + ",".join([str(i) for i in DROPOUT_RATES]))
                for i, j in enumerate(DROPOUT_RATES):
                    DROPOUT_RATES[i] = j / 1.05
                print("new dropout rates: " + ",".join([str(i) for i in DROPOUT_RATES]))
            elif self.verbose > 0:
                print("learning rate is good for now")

        self.previous = current
Exemple #8
0
def incident(lat, day, hour, tilt, direction, attenuate=False):
    """
    incident(lat, day, hour, tilt, direction) computes the normalized
    incident solar radiation of the beam on a panel with normal tilt
    relative to verticle and oriented at angle direction relative to
    true north.

    incident ~ cos X = cos(tilt)*cos(alt) + sin(tilt)*sin(alt)*cos(dir-az)

    The optional attenuate factor accounts for attenuation through the
    atmosphere, typically used in conjunction with computing radiation
    onto an object
    """
    zen  = zenith(lat, day, hour)
    az   = azimuth(lat, day, hour)
    zrad = np.radians(zen)
    trad   = np.radians(tilt)
    drad   = np.radians(direction - az)
    vert   = np.where(np.less(zen,90), np.cos(trad)*np.cos(zrad),0)
    hor    = np.where(np.less(zen,90), np.sin(trad)*np.sin(zrad)*np.cos(drad), 0)
    cosX = np.maximum(0,hor+vert)
    if (attenuate):
        if (attenuate == True): tau = 0.1
        else: tau = attenuate
        return cosX*np.exp(-tau/np.cos(zrad))
    else: 
        return cosX
Exemple #9
0
def analyzeFrame(bgrFrame):
    mutex.acquire()
    if lowerBound and upperBound:

        hsvFrame = cv2.cvtColor(bgrFrame, cv2.COLOR_BGR2HSV)
        centeredBox = hsvFrame[topLeft[1]:bottomLeft[1], topLeft[0]:topRight[0], :]
        boxFlat = centeredBox.reshape([-1, 3])
        numBroken = 0
        # Doing it this ways removes worry of checkInBounds changing while analyzing an individual frame
        # i.e., it won't take effect until the next frame.
        if boundType == 'in':
            for i in xrange(0, (boxFlat.shape)[0]):
                isGreaterLower = numpy.all(numpy.greater(boxFlat[i], lowerBound))
                isLessUpper = numpy.all(numpy.less(boxFlat[i], upperBound))
                if isGreaterLower and isLessUpper:
                    numBroken = numBroken + 1
        else:
            for i in xrange(0, (boxFlat.shape)[0]):
                isLessLower = numpy.all(numpy.less(boxFlat[i], lowerBound))
                isGreaterUpper = numpy.all(numpy.greater(boxFlat[i], upperBound))
                if isLessLower and isGreaterUpper:
                    numBroken = numBroken + 1

        if (numBroken/area) >= threshold:
            sys.stderr.write('Exceeded\n')
            sys.stderr.flush()


    mutex.release()
Exemple #10
0
def mat(I,viewOutput = True):
    stretch = 0
    scale = 1
    npeaks = 1
    mi = imorlet(stretch,scale,0,npeaks)
    Gx = cv2.filter2D(I,-1,mi)
    mi = imorlet(stretch,scale,90,npeaks)
    Gy = cv2.filter2D(I,-1,mi)

    Gmag = np.sqrt(Gx*Gx+Gy*Gy)
    Gmag = Gmag/np.max(Gmag)
    
    Gdir = np.arctan2(Gy,Gx)/np.pi*180 # -180 to 180
    Gdir[np.less(Gdir,0)] = Gdir[np.less(Gdir,0)]+360 # 0 to 360

    H = Gdir
    S = np.ones(np.shape(H))
    V = Gmag

    if viewOutput:
	    nr,nc = np.shape(I)
	    HSV = np.zeros([nr,nc,3]).astype('float32')
	    HSV[:,:,0] = H
	    HSV[:,:,1] = S
	    HSV[:,:,2] = V

	    BGR = cv2.cvtColor(HSV, cv2.COLOR_HSV2BGR)

	    return Gmag, Gdir, BGR

    return Gmag, Gdir
Exemple #11
0
def finite_diff_array(fx, x, ix, order, window, out=None):  # pragma: no cover
    """Fornberg finite difference method for array of points `ix`.
    """
    fx = fx.astype(np.float64)

    w = window[0]
    if w < 0:  # use whole window
        for i, z in enumerate(ix):
            out[i] = diff_fornberg(fx, x, z, order[0])
    else:
        forward_limit = (x[0] + w / 2)
        foward_win = x[0] + w
        backward_limit = (x[-1] - w / 2)
        backward_win = x[-1] - w

        for i, z in enumerate(ix):
            if z < forward_limit:  # use forward diff
                bm = np.less(x, foward_win)
            elif z > backward_limit:  # backward diff
                bm = np.greater(x, backward_win)
            else:  # central diff
                bm = np.less(np.abs(x - z), w)
            wx = x[bm]
            wfx = fx[bm]
            out[i] = diff_fornberg(wfx, wx, z, order[0])
Exemple #12
0
def testCompRSS():
    x1 = np.array([1,2,3])
    y1 = np.array([1,2,3])
    mod1 = lsr.LeastSquare(x1,y1)
    try:
        mod1.compRSS(x1, estimator="NormalFunction")
        print "FAILED to check input arguments!"
    except ValueError:
        print "check input arguments CORRECT!"
    try:
        mod1.compRSS(estimator='NormalFunction')
        print "FAILED to catch non-initialization error"
    except ValueError:
        print "check field variable initialization CORRECT!"
    mod1.normFunc()
    rssNF = mod1.compRSS(estimator='NormalFunction')
    epson = 1e-6
    if np.less(abs(rssNF), epson).all():
        print "compute RSS through normal function CORRECT!"
    else:
        print "FAILED to compute RSS correctly through normal function!"
    mod1.gradientDescent(step=0.05, iteration=150)
    rssGD = mod1.compRSS()
    if np.less(abs(rssGD), epson).all():
        print "compute RSS through gradient descent CORRECT!"
    else:
        print "FAILED to compute RSS correctly through gradient descent!"
Exemple #13
0
def alphabar(s, bw, bh, ori_deg, R=1.0, G=1.0, B=1.0):
	"""Generate a bar into existing sprite using the alpha channel.

	This fills the sprite with 'color' and then puts a [bw x bh] transparent
	bar of the specified orientation in the alpha channel.

	:param s: Sprite()

	:param bw,bh: (pixels) bar width and height

	:param ori_deg: (degrees) bar orientation

	:param R,G,B: (either R is colortriple or R,G,B are 0-1 values)

	:return: nothing (works in place)

	"""
	R, G, B = (np.array(unpack_rgb(None, R, G, B)) * 255.0).astype(np.int)
	r, t = genpolar(s.w, s.h, degrees=True)
	t += ori_deg
	x = r * np.cos(t)
	y = r * np.sin(t)
	s.fill((R,G,B))
	mask = np.where(np.less(abs(x), (bw/2.0)) * np.less(np.abs(y), (bh/2.0)),
					255, 0)
	s.alpha[::] = mask[::].astype(np.uint8)
Exemple #14
0
def prune_outside_window(boxlist, window):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also ClipToWindow which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax]
            of the window.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in.
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """

  y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
  win_y_min = window[0]
  win_x_min = window[1]
  win_y_max = window[2]
  win_x_max = window[3]
  coordinate_violations = np.hstack([np.less(y_min, win_y_min),
                                     np.less(x_min, win_x_min),
                                     np.greater(y_max, win_y_max),
                                     np.greater(x_max, win_x_max)])
  valid_indices = np.reshape(
      np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1])
  return gather(boxlist, valid_indices), valid_indices
 def _findRobots(self):
     """ Finds the robots amoung the edges found
     """
     ## for each right edge find the next closest left edge. This forms an edge pair that could be robot 
     self.Robots = list()
     if len(self.RightEdges) == 0 or len(self.LeftEdges) == 0:
         return
         
     for rightedge in self.RightEdges:
         leftedge = self.LeftEdges[0]
         i = 1
         while leftedge < rightedge:
             if i >= len(self.LeftEdges):
                 break
             leftedge = self.LeftEdges[i]
             i = i + 1
             
         ## now calculate the distance between the two edges
         distance = self.__calculateDistanceBetweenEdges(leftedge, rightedge)
         
         if distance > self.MINIMUM_NAO_WIDTH and distance < self.MAXIMUM_NAO_WIDTH:
             x = self.CartesianData[0,rightedge:leftedge+1]
             y = self.CartesianData[1,rightedge:leftedge+1]
             r = self.PolarData[0,rightedge:leftedge+1]
             c = numpy.less(r, 409.5)
             x = numpy.compress(c, x)
             y = numpy.compress(c, y)                
             robotx = self.__averageObjectDistance(x)
             roboty = self.__averageObjectDistance(y)
             c = numpy.logical_and(numpy.less(numpy.fabs(x - robotx), self.MAXIMUM_NAO_WIDTH), numpy.less(numpy.fabs(y - roboty), self.MAXIMUM_NAO_WIDTH))
             x = numpy.compress(c, x)
             y = numpy.compress(c, y)
             robotr = math.sqrt(robotx**2 + roboty**2)
             robotbearing = math.atan2(roboty, robotx)
             self.Robots.append(Robot(robotx, roboty, robotr, robotbearing, x, y))
Exemple #16
0
 def mask_unique(self, coords, mask=None):
     """Masks points being unique in the unit cell.
     
     Args:
         coords: Cartesian coordinates of the points (atoms).
         mask: Only those coordinates are considered, where mask is True
         
     Returns:
        Logical list containing True for unique atoms and False otherwise.
     """
     if mask is not None:                     
         unique = np.array(mask, dtype=bool)
     else:
         unique = np.ones(( coords.shape[0], ), dtype=bool)                
     if self.period_type == "0D":
         return unique
     relcoords, shifts = self.splitcoords(coords)
     relcoords = np.where(
         np.greater(relcoords, 1.0 - nc.RELATIVE_PERIODIC_TOLERANCE), 
         relcoords - 1.0, relcoords)
     onbounds = np.flatnonzero(np.any(np.less(relcoords, 0.01), axis=1))
     onbounds_rel = relcoords[onbounds]
     onbounds_cart = np.dot(onbounds_rel, self.axis_cart) + shifts[onbounds]
     for ii in range(len(onbounds_cart)):
         if not unique[onbounds[ii]]:
             continue
         diff = onbounds_cart[ii+1:] - onbounds_cart[ii]
         equiv = np.flatnonzero(np.less(np.sum(diff**2, axis=1),
                                        nc.DISTANCE_TOLERANCE**2))
         unique[onbounds[equiv + ii + 1]] = False
     return unique
Exemple #17
0
def parallel_gibbs_sampler2(p, N_dim, N_t, N_samples, N_burnin, N_skip, init, dx, *args, **kwargs):
    '''
    What a year it's been. 
    This will return a number of samples from the probability function p
    obtained by gibbs sampling

    Parameters 
    ------
    p : function 
        A probability density over Ndim response variables that are {0, 1}
    N_dim : int 
        Number of dimensions of response
    N_t : int 
        Number of time points in response  
    N_samples : int 
        Number of samples you want 
    N_burnin : int 
        Number of iterations to throw away at beginning 
    N_skip : int 
        Number of iterations to skip before taking a sample 
    init : array 
        starting point.  Ndim x N_t

    Returns
    ------
    samples : array 
        Samples generated from p
        Ndim x N_t x N_samples
    '''

    resp = init 
    samples = np.zeros((N_dim, N_t, N_samples))
    n_accept = np.zeros((N_dim, N_t))
    # Burn in
    for step in range(N_burnin):
        for x in range(N_dim):
            resp[x, :] = 1
            raw_prob1 = p(resp, *args, **kwargs)
            resp[x, :] = -1
            raw_prob2 = p(resp, *args, **kwargs)
            prob1 = np.divide(raw_prob1,(raw_prob1+raw_prob2))
            to_1 = np.less(np.random.uniform(size=(1, N_t)), prob1)
            resp[x, to_1.flatten()] = 1
    iters=0
    samps=0
    while samps < N_samples:
        iters = iters+1
        for x in range(N_dim):
            resp[x, :] = 1
            raw_prob1 = p(resp, *args, **kwargs)
            resp[x, :] = -1
            raw_prob2 = p(resp, *args, **kwargs)
            prob1 = np.divide(raw_prob1,(raw_prob1+raw_prob2))
            to_1 = np.less(np.random.uniform(size=(1, N_t)), prob1)
            resp[x, to_1.flatten()] = 1
        if np.mod(iters, N_skip) == 0:
            samples[:, :, samps] = resp
            samps = samps+1
    return samples 
Exemple #18
0
 def test_randn(self):
     # Simple distributional checks for sparse.randn.
     # Statistically, some of these should be negative
     # and some should be greater than 1.
     for random_state in None, 4321, np.random.RandomState():
         x = _sprandn(10, 20, density=0.5, dtype=np.float64, random_state=random_state)
         assert_(np.any(np.less(x.data, 0)))
         assert_(np.any(np.less(1, x.data)))
Exemple #19
0
 def __lt__(a, b):
     try:
         return np.less(a.v, b.v)
     except AttributeError:
         if isinstance(a, Measurement):
             return np.less(a.v, b)
         else:
             return np.less(a, b.v)
Exemple #20
0
def gradient_to_spherical(gx,gy):
	"""
	This function convert gradient coordinates of the 
	reflector into spherical coordinates of reflected rays
	on the unit sphere S2.
	
	Parameters
	----------
	gx : 1D array
		Gradients coordinate along x axis
	gy : 1D array
		Gradients coordinate along y axis
	
	Returns
	-------
	theta : 1D array
		Inclination angles (with respect to the
		positiv z axis). 0 <= theta <= pi
	phi : 1D array
		Azimuthal angles (projection of a direction
		in z=0 plane with respect to the x axis).
		0 <= phi <= 2pi
		
	See Also
	--------
	Inverse Methods for Illumination Optics, Corien Prins
	"""
	try:
		if len(gx.shape) > 1 or len(gy.shape) > 1:
			raise NotProperShapeError("gx and gy must be 1D arrays.")
		
		if gx.shape != gy.shape:
			raise NotProperShapeError("gx and gy must have the same length.")
			
		# theta computation
		num = gx*gx + gy*gy - 1
		denom = gx*gx + gy*gy + 1
		theta = np.arccos(num/denom)

		# phi computation
		zero = np.zeros(gx.shape)
		phi = np.zeros(gx.shape)
		J = np.logical_and(np.greater_equal(gx,zero),np.greater_equal(gy,zero))
		phi[J] = np.arctan(gy[J]/gx[J])
			
		J = np.less(gx, zero)
		phi[J] = np.arctan(gy[J]/gx[J]) + np.pi
			
		J = np.logical_and(np.greater_equal(gx, zero), np.less(gy, zero))
		phi[J] = np.arctan(gy[J]/gx[J]) + 2*np.pi
			
		return theta, phi
		
	except FloatingPointError:
		print("****gradient_to_spherical error: division by zero.")
		
	except NotProperShapeError, arg:
		print("****gradient_to_spherical error: ", arg.msg)
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = self.high
        weights = weights.copy()
        weights[selection] = 0.0

        numpy.greater_equal(q, self.low, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.underflow._numpy(data, subweights, shape)

        numpy.less(q, self.high, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.overflow._numpy(data, subweights, shape)

        if all(isinstance(value, Count) and value.transform is identity for value in self.values) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):
            # Numpy defines histograms as including the upper edge of the last bin only, so drop that
            weights[q == self.high] == 0.0

            h, _ = numpy.histogram(q, self.num, (self.low, self.high), weights=weights)

            for hi, value in zip(h, self.values):
                value.fill(None, float(hi))

        else:
            q = numpy.array(q, dtype=numpy.float64)
            numpy.subtract(q, self.low, q)
            numpy.multiply(q, self.num, q)
            numpy.divide(q, self.high - self.low, q)
            numpy.floor(q, q)
            q = numpy.array(q, dtype=int)

            for index, value in enumerate(self.values):
                numpy.not_equal(q, index, selection)
                subweights[:] = weights
                subweights[selection] = 0.0
                value._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
def run(Xl_train, Xs_train, y_train, Xl_validate, Xs_validate, y_validate, Xs_test, num_lambdas=DEFAULT_NUM_LAMBDAS):
    lambda_guesses = np.power(np.e, np.arange(MIN_POWER, MAX_POWER, (MAX_POWER - MIN_POWER - 0.1) / num_lambdas))
    print "lambda_guesses", lambda_guesses

    Xs = np.vstack((Xs_train, Xs_validate, Xs_test))
    order_indices = np.argsort(np.reshape(np.array(Xs), Xs.size), axis=0)

    num_train = Xs_train.shape[0]
    num_train_and_validate = num_train + Xs_validate.shape[0]
    train_indices = np.reshape(np.array(np.less(order_indices, num_train)), order_indices.size)
    validate_indices = np.logical_and(
        np.logical_not(train_indices),
        np.reshape(np.array(np.less(order_indices, num_train_and_validate)), order_indices.size)
    )
    Xs_ordered = Xs[order_indices]

    def _get_reordered_data(train_data, validate_data):
        dummy_data = np.zeros((TEST_SIZE, train_data.shape[1]))
        combined_data = np.concatenate((train_data, validate_data, dummy_data))
        ordered_data = combined_data[order_indices]
        return ordered_data[train_indices], ordered_data[validate_indices]

    # need to reorder the rest of the data too now
    Xl_train_ordered, Xl_validate_ordered = _get_reordered_data(Xl_train, Xl_validate)
    y_train_ordered, y_validate_ordered = _get_reordered_data(y_train, y_validate)

    problem_wrapper = SmoothAndLinearProblemWrapper(Xl_train_ordered, Xs_ordered, train_indices, y_train_ordered, use_l1=False)

    best_beta = []   # initialize
    best_thetas = []   # initialize
    best_cost = 1e10  # initialize to something huge
    best_lambdas = [lambda_guesses[0]] * 3

    for l1 in lambda_guesses:
        for l2 in lambda_guesses:
            for l3 in lambda_guesses:
                lambdas = [l1, l2, l3]
                try:
                    beta, thetas = problem_wrapper.solve(lambdas, use_robust=False)
                except cvxpy.error.SolverError:
                    print "CANT SOLVE THIS ONE", lambdas
                    continue

                current_cost = testerror_smooth_and_linear(Xl_validate_ordered, y_validate_ordered, beta, thetas[validate_indices])
                print "gridsearch:", current_cost, "[l1, l2, l3]", lambdas
                if best_cost > current_cost:
                    best_cost = current_cost
                    best_beta = beta
                    best_thetas = thetas
                    best_lambdas = lambdas
                    print "gridsearch: best cost", best_cost, "best_lambdas", best_lambdas

    print "gridsearch: best_validation_error", best_cost
    print "gridsearch: best lambdas:", best_lambdas

    return best_beta, best_thetas, best_cost
Exemple #23
0
def rt_chisq(x, axis=None, warn=True):
    """Chi square fit for reaction times (a better summary statistic than mean)

    Parameters
    ----------
    x : array-like
        Reaction time data to fit.
    axis : int | None
        The axis along which to calculate the chi-square fit. If none, ``x``
        will be flattened before fitting.
    warn : bool
        If True, warn about possible bad reaction times.

    Returns
    -------
    peak : float | array-like
        The peak(s) of the fitted chi-square probability density function(s).

    Notes
    -----
    Verify that it worked by plotting pdf vs hist (for 1-dimensional x)::

        >>> import numpy as np
        >>> from scipy import stats as ss
        >>> import matplotlib.pyplot as plt
        >>> plt.ion()
        >>> x = np.abs(np.random.randn(10000) + 1)
        >>> lsp = np.linspace(np.floor(np.min(x)), np.ceil(np.max(x)), 100)
        >>> df, loc, scale = ss.chi2.fit(x, floc=0)
        >>> pdf = ss.chi2.pdf(lsp, df, scale=scale)
        >>> plt.plot(lsp, pdf)   # doctest: +ELLIPSIS
        [<matplotlib.lines.Line2D object at ...>]
        >>> _ = plt.hist(x, density=True)
    """
    x = np.asarray(x)
    if np.any(np.less(x, 0)):  # save the user some pain
        raise ValueError('x cannot have negative values')
    if axis is None:
        df, _, scale = ss.chi2.fit(x, floc=0)
    else:
        def fit(x):
            return np.array(ss.chi2.fit(x, floc=0))
        params = np.apply_along_axis(fit, axis=axis, arr=x)  # df, loc, scale
        pmut = np.concatenate((np.atleast_1d(axis),
                               np.delete(np.arange(x.ndim), axis)))
        df = np.transpose(params, pmut)[0]
        scale = np.transpose(params, pmut)[2]
    quartiles = np.percentile(x, (25, 75))
    whiskers = quartiles + np.array((-1.5, 1.5)) * np.diff(quartiles)
    n_bad = np.sum(np.logical_or(np.less(x, whiskers[0]),
                                 np.greater(x, whiskers[1])))
    if n_bad > 0 and warn:
        warnings.warn('{0} likely bad values in x (of {1})'
                      ''.format(n_bad, x.size))
    peak = np.maximum(0, (df - 2)) * scale
    return peak
Exemple #24
0
def radec(ra, dec, hours=""):
  """radec(ra, dec, hours="")
  Converts RA and Dec from decimal to sexigesimal units
  Returns a tuple (ihr, imin, xsec, imn, wsc)
  
  INPUTS:
    ra - right ascension, float or array, in degrees unless 
         "hours" is set
    dec - declination in decimal degrees, float or array, same
          number of elements as ra     
  
  OPTIONAL INPUT:
    hours - if set to true, then the right ascension input should
            be set to hours instead of degrees
            
  OUTPUTS:
    ihr - right ascension hours (float or array)
    imin - right ascension minutes (float or array)
    xsec - right ascension seconds (float or array)
    ideg - declination degrees (float or array)
    imn - declination minutes (float or array)
    xsc - declination seconds (float or array)         
  
  >>> radec(0,0) 
  array(0,0,0,0,0)
  """


  # Compute RA
  if(hours):
    ra =  numpy.mod(ra, 24)
    ra = ra + 24*(numpy.less(ra, 0) )
    ihr = numpy.fix(ra)
    xmin = numpy.abs(ra*60.0 - ihr*60.0)
  else:
    ra = numpy.mod(ra, 360)
    ra = ra + 360*(numpy.less(ra, 0))
    ihr = numpy.fix(ra/15.0)
    xmin = numpy.abs(ra*4.0 - ihr*60.0)

  imin = numpy.fix(xmin)
  xsec = (xmin - imin)*60.0

  # Compute Dec
  ideg = numpy.fix(dec)
  xmn = numpy.abs(dec - ideg)*60.0
  imn = numpy.fix(xmn)
  xsc = (xmn - imn)*60.0

  # Testing for Special Case of Zero Degrees

  zero_deg = numpy.equal(ideg, 0)  & numpy.less(dec, 0) 
  imn = imn - 2*imn*numpy.fix( zero_deg * ( numpy.not_equal(imn, 0) ) )
  xsc = xsc - 2 *xsc*zero_deg*(numpy.equal(imn, 0) )  

  return ihr, imin, xsec, ideg, imn, xsc
Exemple #25
0
    def function(self,E) :
        """ Calculates the number of counts in barns"""
        
        if self.delta.value != self._previous_delta:
            self._previous_delta = copy.copy(self.delta.value)
            self.integrategos(self.delta.value)
            self.calculate_knots()

        if self._previous_effective_angle != self.effective_angle.value:
            self.integrategos()
            
        factor = 4.0 * np.pi * a0 ** 2.0 * R**2 / E / self.T #to convert to m**2/bin
        Emax = self.energyaxis[-1] + self.edgeenergy + \
        self.delta.value #maximum tabulated energy
        cts = np.zeros((len(E)))
        
        if self.fs_state is True:
            if self.__knots[-1] > Emax : Emax = self.__knots[-1]
            fine_structure_indices=np.logical_and(np.greater_equal(E, 
            self.edgeenergy+self.delta.value), 
            np.less(E, self.edgeenergy + self.delta.value + self.fs_emax))
            tabulated_indices = np.logical_and(np.greater_equal(E, 
            self.edgeenergy + self.delta.value + self.fs_emax), 
            np.less(E, Emax))
            if self.fs_mode == "new_spline" :
                cts = np.where(fine_structure_indices, 
                1E-25*splev(E,(self.__knots,self.fslist.value,3),0), cts)
            elif self.fs_mode == "spline" :
                cts = np.where(fine_structure_indices, 
                cspline1d_eval(self.fslist.value, 
                E, 
                dx = self.energy_scale / self.knots_factor, 
                x0 = self.edgeenergy+self.delta.value), 
                cts)
            elif self.fs_mode == "spline_times_edge" :
                cts = np.where(fine_structure_indices, 
                factor*splev((E-self.edgeenergy-self.delta.value), 
                self.__goscoeff)*cspline1d_eval(self.fslist.value, 
                E,dx = self.energy_scale / self.knots_factor, 
                x0 = self.edgeenergy+self.delta.value), 
                cts )
        else:
            tabulated_indices = np.logical_and(np.greater_equal(E, 
            self.edgeenergy + self.delta.value), np.less(E, Emax))            
        powerlaw_indices = np.greater_equal(E,Emax)  
        cts = np.where(tabulated_indices, 
        factor * splev((E-self.edgeenergy-self.delta.value), 
        self.__goscoeff),
         cts)
        
        # Convert to barns/dispersion.
        #Note: The R factor is introduced in order to give the same value
        # as DM, although it is not in the equations.
        cts = np.where(powerlaw_indices, self.A * E**-self.r, cts) 
        return (self.__subshell_factor * self.intensity.value * self.energy_scale 
        * 1.0e28 / R) * cts       
    def __init__(self,Lx,Ly,Re,CFL):
        """
        The constructor takes the following arguments
              
          *Lx*: float
            Length of the box in the x direction
        
          *Ly*: float
            Length of the box in the y direction
        
          *Re*: float
            Reynolds nuber based on *Lx* or *Ly
        
          *CFL*: float
            CFL number for temporal integration
        """
        self.Re = Re
        self.CFL = CFL
        # Estimate the Kolmogorov scale in the 2D turbulence: 
        # eta = cte/sqrt(Re) 
        nx = 2*numpy.round(0.64*Lx*numpy.sqrt(Re)/2)  # x-modes
        ny = 2*numpy.round(0.64*Ly*numpy.sqrt(Re)/2)  # y-modes
        self.dl = numpy.min([Lx/(nx-1),Ly/(ny-1)]) # minimum mesh size
        
        print 'Lx, Ly: ',Lx,', ',Ly
        print 'number of nx modes: ',nx
        print 'number of ny modes: ',ny
        
        # Low storage RK-4
        self.b   = numpy.array([0,1/6,1/3,1/3,1/6])
        self.a   = numpy.array([0,1/2,1/2,1])
        self.dtv = CFL*self.dl**2*Re
        
        self.kx,self.ky = numpy.meshgrid(
            numpy.mod(numpy.arange(1,nx+1)-numpy.ceil(nx/2+1),nx)-\
                numpy.floor(nx/2),
            numpy.mod(numpy.arange(1,ny+1)-numpy.ceil(ny/2+1),ny)-\
                numpy.floor(ny/2))

        self.kx = self.kx*2*numpy.pi/Lx
        self.ky = self.ky*2*numpy.pi/Ly
        self.Lap = -(self.kx**2+self.ky**2)
        self.poisson = self.Lap
        self.poisson[0,0] = 1
        self.dealias = numpy.logical_and(
            numpy.less(numpy.abs(self.kx*Lx/(2*numpy.pi)),nx/3),
            numpy.less(numpy.abs(self.ky*Ly/(2*numpy.pi)),ny/3))
            
        self.t = 0
        self.dt = 0
        self.omega_hat = numpy.zeros(self.dealias.shape,dtype='complex')
        self.S1 = numpy.zeros(self.omega_hat.shape,dtype='complex')
        
        # Useful to create initial conditions
        self.nx = int(nx)
        self.ny = int(ny)
Exemple #27
0
 def __lt__(self, other):
     if not isinstance(other, Point):
         other = Point(other)
     # origin = Point(nd=self.nd)
     # return self.euclidean_distance(origin) < \
     #     other.euclidean_distance(origin)
     return np.all(np.less(self.__array__(), other.__array__())) or \
         (np.any(np.less(self.__array__(), other.__array__())) and
          (self.x < other.x) or (self.x <= other.x and self.y < other.y) or
          (self.x <= other.x and self.y <= other.y and self.z < other.z))
def numpy_ufunc_test():
    X = np.arange(8)
    compResult = np.less(X, 3)
    reduc = np.add.reduce(compResult)

    print(X)
    print(compResult)
    print(reduc)

    print(np.add.reduce(np.less(X, 3)))
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = 0.0
        weights = weights.copy()
        weights[selection] = 0.0

        if all(isinstance(v, Count) and v.transform is identity for c, v in self.bins) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):

            h, _ = numpy.histogram(q, [float("-inf")] + [(c1 + c2)/2.0 for (c1, v1), (c2, v2) in zip(self.bins[:-1], self.bins[1:])] + [float("inf")], weights=weights)

            for hi, (c, v) in zip(h, self.bins):
                v.fill(None, float(hi))

        else:
            selection = numpy.empty(q.shape, dtype=numpy.bool)
            selection2 = numpy.empty(q.shape, dtype=numpy.bool)

            for index in xrange(len(self.bins)):
                if index == 0:
                    high = (self.bins[index][0] + self.bins[index + 1][0])/2.0
                    numpy.greater_equal(q, high, selection)

                elif index == len(self.bins) - 1:
                    low = (self.bins[index - 1][0] + self.bins[index][0])/2.0
                    numpy.less(q, low, selection)

                else:
                    low = (self.bins[index - 1][0] + self.bins[index][0])/2.0
                    high = (self.bins[index][0] + self.bins[index + 1][0])/2.0
                    numpy.less(q, low, selection)
                    numpy.greater_equal(q, high, selection2)
                    numpy.bitwise_or(selection, selection2, selection)

                subweights[:] = weights
                subweights[selection] = 0.0
                self.bins[index][1]._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
Exemple #30
0
def main(argv):

#  plt.ion()

  side_padding = 15
  sep_energy = 2109
  dep_energy = 1597
  binwidth = 0.5
  
  file_names = ["ms_event_set_runs11510-11530_mcmcfit.npz", "ms_event_set_runs11530-11560_mcmcfit.npz", "ms_event_set_runs11560-11570_mcmcfit.npz"]
  all_wfs = []
  for file_name in file_names:
    if os.path.isfile(file_name):
      data = np.load(file_name)
      all_wfs.append(  data['wfs'][:])
    else:
      print "no wf file named %s" % file_name
      exit(0)

  all_wfs = np.concatenate(all_wfs[:])
  energy_arr = np.zeros(all_wfs.size)
  like_arr = np.zeros(all_wfs.size)
  
  for (idx, wf) in enumerate(all_wfs):
    energy_arr[idx] = wf.energy
    like_arr[idx] = -1*wf.lnprob / wf.wfLength

  like_arr[ np.where( np.isnan(like_arr) == 1) ] = np.inf

  dep_idxs =  np.where(np.logical_and(np.less(energy_arr, 1800), np.isfinite(like_arr)))[0]
  r_arr = np.empty(len(dep_idxs))
  z_arr = np.empty(len(dep_idxs))
  like_arr_dep = like_arr[dep_idxs]

  for (new_idx, all_wf_idx) in enumerate(dep_idxs):
    samples = all_wfs[all_wf_idx].samples
    r_hist, r_bins = np.histogram(samples[:,0], bins=np.linspace(0, 33.8, 339 ))
    z_hist, z_bins = np.histogram(samples[:,2], bins=np.linspace(0, 39.3, 394 ))
    
    r_arr[new_idx] = r_bins[np.argmax(r_hist)]
    z_arr[new_idx] = z_bins[np.argmax(z_hist)]

  best_dep_idxs = np.where( np.less(like_arr_dep, 2) )[0]
  ok_dep_idxs = np.where(np.logical_and( np.greater(like_arr_dep, 2),np.less(like_arr_dep, 3) ))[0]
  bad_dep_idxs = np.where(np.greater(like_arr_dep, 3) )[0]

  plt.figure()
  plt.scatter(r_arr[best_dep_idxs], z_arr[best_dep_idxs], color="g")
  plt.scatter(r_arr[ok_dep_idxs], z_arr[ok_dep_idxs], color="b")
  plt.scatter(r_arr[bad_dep_idxs], z_arr[bad_dep_idxs], color="r")

  plt.xlim(0, 34)
  plt.ylim(0,38)

  plt.show()
Exemple #31
0
def delta_elu(z, alpha=1.0):
    z = np.asarray(z)
    return np.greater_equal(z, 0).astype(int) + \
                alpha * np.exp(np.minimum(z, 0)) * np.less(z, 0).astype(int)
Exemple #32
0
 def check_parameter(a, b):
     check = np.all(
         np.less(np.abs(np.array(a) - np.array(b)), TOL_CHECK))
     return check
Exemple #33
0
def main(inputFile='../../sounds/bendir.wav',
         window='hamming',
         M=2001,
         N=2048,
         t=-80,
         minSineDur=0.02,
         maxnSines=150,
         freqDevOffset=10,
         freqDevSlope=0.001,
         stocf=0.2):
    """
	inputFile: input sound file (monophonic with sampling rate of 44100)
	window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
	M: analysis window size; N: fft size (power of two, bigger or equal than M)
	t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
	maxnSines: maximum number of parallel sinusoids
	freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
	freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
	stocf: decimation factor used for the stochastic approximation
	"""

    # size of fft used in synthesis
    Ns = 512

    # hop size (has to be 1/4 of Ns)
    H = 128

    # read input sound
    (fs, x) = UF.wavread(inputFile)

    # compute analysis window
    w = get_window(window, M, fftbins=True)

    # perform sinusoidal+sotchastic analysis
    tfreq, tmag, tphase, stocEnv = SPS.spsModelAnal(x, fs, w, N, H, t,
                                                    minSineDur, maxnSines,
                                                    freqDevOffset,
                                                    freqDevSlope, stocf)

    # synthesize sinusoidal+stochastic model
    y, ys, yst = SPS.spsModelSynth(tfreq, tmag, tphase, stocEnv, Ns, H, fs)

    # output sound file (monophonic with sampling rate of 44100)
    outputFileSines = 'output_sounds/' + os.path.basename(
        inputFile)[:-4] + '_spsModel_sines.wav'
    outputFileStochastic = 'output_sounds/' + os.path.basename(
        inputFile)[:-4] + '_spsModel_stochastic.wav'
    outputFile = 'output_sounds/' + os.path.basename(
        inputFile)[:-4] + '_spsModel.wav'

    # write sounds files for sinusoidal, residual, and the sum
    UF.wavwrite(ys, fs, outputFileSines)
    UF.wavwrite(yst, fs, outputFileStochastic)
    UF.wavwrite(y, fs, outputFile)

    # create figure to plot
    plt.figure(figsize=(9, 6))

    # frequency range to plot
    maxplotfreq = 10000.0

    # plot the input sound
    plt.subplot(3, 1, 1)
    plt.plot(np.arange(x.size) / float(fs), x)
    plt.axis([0, x.size / float(fs), min(x), max(x)])
    plt.ylabel('amplitude')
    plt.xlabel('time (sec)')
    plt.title('input sound: x')

    plt.subplot(3, 1, 2)
    numFrames = int(stocEnv[:, 0].size)
    sizeEnv = int(stocEnv[0, :].size)
    frmTime = H * np.arange(numFrames) / float(fs)
    binFreq = (.5 * fs) * np.arange(sizeEnv * maxplotfreq /
                                    (.5 * fs)) / sizeEnv
    plt.pcolormesh(
        frmTime, binFreq,
        np.transpose(stocEnv[:, :int(sizeEnv * maxplotfreq / (.5 * fs) + 1)]))
    plt.autoscale(tight=True)

    # plot sinusoidal frequencies on top of stochastic component
    if (tfreq.shape[1] > 0):
        sines = tfreq * np.less(tfreq, maxplotfreq)
        sines[sines == 0] = np.nan
        numFrames = int(sines[:, 0].size)
        frmTime = H * np.arange(numFrames) / float(fs)
        plt.plot(frmTime, sines, color='k', ms=3, alpha=1)
        plt.xlabel('time(s)')
        plt.ylabel('Frequency(Hz)')
        plt.autoscale(tight=True)
        plt.title('sinusoidal + stochastic spectrogram')

    # plot the output sound
    plt.subplot(3, 1, 3)
    plt.plot(np.arange(y.size) / float(fs), y)
    plt.axis([0, y.size / float(fs), min(y), max(y)])
    plt.ylabel('amplitude')
    plt.xlabel('time (sec)')
    plt.title('output sound: y')

    plt.tight_layout()
    plt.ion()
    plt.show()
Exemple #34
0
    def getPositionPeridocityBroke(self, pt3d, jitter, cutoff):
        if autopack.biasedPeriodicity != None:
            biased = autopack.biasedPeriodicity
        else:
            biased = jitter
        O = numpy.array(self.boundingBox[0])
        E = numpy.array(self.boundingBox[1])
        P = numpy.array(pt3d)
        translation = None
        if not autopack.testPeriodicity:
            return None
        # distance to front-lower-left
        d1 = (P - O) * biased
        s1 = min(x for x in d1[d1 != 0] if x != 0)
        #        i1=list(d1).index(s1)
        m1 = numpy.logical_and(numpy.less(d1, cutoff), numpy.greater(d1, 0.0))
        i1 = numpy.nonzero(m1)[0]

        # distance to back-upper-right
        d2 = (E - P) * biased
        s2 = min(x for x in d2[d2 != 0] if x != 0)
        #        i2=list(d2).index(s2)
        m2 = numpy.logical_and(numpy.less(d2, cutoff), numpy.greater(d2, 0.0))
        i2 = numpy.nonzero(m2)[0]
        # first case to look for is the corner and return all positions
        if s1 < s2:  # closer to left bottom corner
            tr = []
            corner = numpy.zeros((4, 3))  # 7 corner / 3 corner 3D / 2D
            for i in i1:
                tr.append(pt3d + self.preriodic_table["left"][i])
                corner[0] += self.preriodic_table["left"][i]
                # the corner are
                # X+Y+Z corner[0]
                # X+Y+0 corner[1]
                # X+0+Z corner[2]
                # 0+Y+Z corner[3]
            if len(i1) == 2:
                tr.append(pt3d + corner[0])
            if len(i1) == 3:
                corner[1] = self.preriodic_table["left"][0] + self.preriodic_table["left"][1]
                corner[2] = self.preriodic_table["left"][0] + self.preriodic_table["left"][2]
                corner[3] = self.preriodic_table["left"][1] + self.preriodic_table["left"][2]
                for i in range(4):
                    if sum(corner[i]) != 0:
                        tr.append(pt3d + corner[i])
            translation = tr
            # i1 is the axis to use for the boundary
            #            if s1 < cutoff :
            #                translation=self.preriodic_table["left"][i1]
            return translation
        else:
            tr = []
            corner = numpy.zeros((4, 3))  # 7 corner / 3 corner 3D / 2D
            for i in i2:
                tr.append(pt3d + self.preriodic_table["right"][i])
                corner[0] += self.preriodic_table["right"][i]
            if len(i2) == 2:
                tr.append(pt3d + corner[0])
            if len(i2) == 3:
                corner[1] = self.preriodic_table["right"][0] + self.preriodic_table["right"][1]
                corner[2] = self.preriodic_table["right"][0] + self.preriodic_table["right"][2]
                corner[3] = self.preriodic_table["right"][1] + self.preriodic_table["right"][2]
                for i in range(4):
                    if sum(corner[i]) != 0:
                        tr.append(pt3d + corner[i])
            translation = tr
            # i1 is the axis to use for the boundary
            #            if s2 < cutoff :
            #                translation=self.preriodic_table["right"][i2]
            return translation
        return None
    def __init__(
        self,
        base_lr=0.001,
        max_lr=0.006,
        step_size=2000.0,
        mode="triangular",
        gamma=1.0,
        reduce_on_plateau=0,
        monitor="val_loss",
        reduce_factor=2,
        max_momentum=0.95,
        min_momentum=0.85,
        verbose=1,
    ):
        """
        References:
            Original Paper: https://arxiv.org/abs/1803.09820
            Blog Post: https://sgugger.github.io/the-1cycle-policy.html
            Code Reference:
                https://github.com/bckenstler/CLR
                https://github.com/amaiya/ktrain/blob/master/ktrain/lroptimize/triangular.py
        """
        super(Callback, self).__init__()

        self.base_lr = base_lr
        self.max_lr = max_lr
        self.step_size = step_size
        self.mode = mode
        self.gamma = gamma
        if self.mode == "triangular":
            self.scale_fn = lambda x: 1.0
            self.scale_mode = "cycle"
        elif self.mode == "triangular2":
            self.scale_fn = lambda x: 1 / (2.0**(x - 1))
            self.scale_mode = "cycle"
        elif self.mode == "exp_range":
            self.scale_fn = lambda x: gamma**x
            self.scale_mode = "iterations"
        self.clr_iterations = 0.0
        self.trn_iterations = 0.0
        self.history = {}
        self.orig_base_lr = None
        self.wait = 0

        # restoring weights due to CRF bug
        self.best_weights = None

        # LR reduction
        self.verbose = verbose
        self.patience = reduce_on_plateau
        self.factor = 1.0 / reduce_factor
        self.monitor = monitor
        if "acc" not in self.monitor:
            self.monitor_op = lambda a, b: np.less(a, b)
            self.best = np.Inf
        else:
            self.monitor_op = lambda a, b: np.greater(a, b)
            self.best = -np.Inf

        # annihalting LR
        self.overhump = False

        # cyclical momentum
        self.max_momentum = max_momentum
        self.min_momentum = min_momentum
        if self.min_momentum is None and self.max_momentum:
            self.min_momentum = self.max_momentum
        elif self.min_momentum and self.max_momentum is None:
            self.max_momentum = self.min_momentum
        self.cycle_momentum = True if self.max_momentum is not None else False

        self._reset()
def print_calipso_stats_ctype(caObj, statfile, val_subset, low_medium_high_class):
    if config.CCI_CLOUD_VALIDATION :
        logger.info("Cloudtype validation not useful for CCI validation")
        return
    if caObj.avhrr.cloudtype is None:
        logger.warning("There are no cloudtype data.")
        return
    # CLOUD TYPE EVALUATION - Based exclusively on CALIPSO data (Vertical Feature Mask)
    # =======================
    calipso_low = np.logical_and(low_medium_high_class['low_clouds'],
                                 val_subset)
    calipso_medium = np.logical_and(low_medium_high_class['medium_clouds'],
                                    val_subset)
    calipso_high = np.logical_and(low_medium_high_class['high_clouds'],
                                 val_subset)

    if  caObj.avhrr.cloudtype_conditions is not None: 
        logger.info("Assuming cloudtype structure from pps v2014")
        avhrr_low = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,5),
                           np.less_equal(caObj.avhrr.cloudtype,6)),
            val_subset)
        avhrr_medium = np.logical_and(
            np.equal(caObj.avhrr.cloudtype,7), val_subset)
        avhrr_high_op = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,8),
                           np.less_equal(caObj.avhrr.cloudtype,9)),
            val_subset)
        avhrr_high_semi = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,11),
                           np.less_equal(caObj.avhrr.cloudtype,15)),
            val_subset)
        avhrr_high = np.logical_or(avhrr_high_op,avhrr_high_semi)
        avhrr_frac = np.logical_and(np.equal(caObj.avhrr.cloudtype,10), 
                                    val_subset)

    else:
        logger.info("Assuming cloudtype structure from pps v2012")
        avhrr_low = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,5),
                           np.less_equal(caObj.avhrr.cloudtype,8)),
            val_subset)
        avhrr_medium = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,9),
                           np.less_equal(caObj.avhrr.cloudtype,10)),
            val_subset)
        avhrr_high = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,11),
                           np.less_equal(caObj.avhrr.cloudtype,18)),
            val_subset)
        avhrr_frac = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,19),
                           np.less_equal(caObj.avhrr.cloudtype,19)),
            val_subset)

    calipso_clear = np.logical_and(
        np.less(caObj.calipso.cloud_fraction,0.34),val_subset)
    calipso_cloudy = np.logical_and(
        np.greater(caObj.calipso.cloud_fraction,0.66),val_subset)
    avhrr_clear = np.logical_and(
        np.logical_and(np.less_equal(caObj.avhrr.cloudtype,4),
                       np.greater(caObj.avhrr.cloudtype,0)),
        val_subset)
    
    
    # Notice that we have unfortunately changed order in notation compared to cloud mask
    # Here the PPS category is mentioned first and then the CALIOP category 

    n_low_low = np.repeat(
        avhrr_low,
        np.logical_and(calipso_low,avhrr_low)).shape[0]
    n_low_medium = np.repeat(
        avhrr_low,
        np.logical_and(calipso_medium,avhrr_low)).shape[0]
    n_low_high = np.repeat(
        avhrr_low,
        np.logical_and(calipso_high,avhrr_low)).shape[0]
    n_medium_low = np.repeat(
        avhrr_medium,
        np.logical_and(calipso_low,avhrr_medium)).shape[0]
    n_medium_medium = np.repeat(
        avhrr_medium,
        np.logical_and(calipso_medium,avhrr_medium)).shape[0]
    n_medium_high = np.repeat(
        avhrr_medium,
        np.logical_and(calipso_high,avhrr_medium)).shape[0]
    n_high_low = np.repeat(
        avhrr_high, 
        np.logical_and(calipso_low,avhrr_high)).shape[0]
    n_high_medium = np.repeat(
        avhrr_high,
        np.logical_and(calipso_medium,avhrr_high)).shape[0]
    n_high_high = np.repeat(
        avhrr_high,
        np.logical_and(calipso_high,avhrr_high)).shape[0]
    n_frac_low = np.repeat(
        avhrr_frac,
        np.logical_and(calipso_low,avhrr_frac)).shape[0]
    n_frac_medium = np.repeat(
        avhrr_frac,
        np.logical_and(calipso_medium,avhrr_frac)).shape[0]
    n_frac_high = np.repeat(
        avhrr_frac,
        np.logical_and(calipso_high,avhrr_frac)).shape[0]

    n_clear_low = np.repeat(
        avhrr_clear,
        np.logical_and(calipso_low,avhrr_clear)).shape[0]
    n_clear_medium = np.repeat(
        avhrr_clear,
        np.logical_and(calipso_medium,avhrr_clear)).shape[0]
    n_clear_high = np.repeat(
        avhrr_clear,
        np.logical_and(calipso_high,avhrr_clear)).shape[0]
    n_low_clear = np.repeat(
        avhrr_low,
        np.logical_and(calipso_clear,avhrr_low)).shape[0]
    n_medium_clear = np.repeat(
        avhrr_medium,
        np.logical_and(calipso_clear,avhrr_medium)).shape[0]
    n_high_clear = np.repeat(
        avhrr_high,
        np.logical_and(calipso_clear,avhrr_high)).shape[0]
    n_frac_clear = np.repeat(
        avhrr_frac,
        np.logical_and(calipso_clear,avhrr_frac)).shape[0]

    if (n_low_low+n_medium_low+n_high_low+n_frac_low) > 0:
        pod_low = float(n_low_low + n_frac_low)/(n_low_low+n_medium_low+n_high_low+n_frac_low)
        far_low = float(n_medium_low+n_high_low)/(n_low_low+n_medium_low+n_high_low+n_frac_low)
    else:
        pod_low = -9.0
        far_low = -9.0
    if (n_low_medium+n_medium_medium+n_high_medium+n_frac_medium) > 0:
        pod_medium = float(n_medium_medium)/(n_low_medium+n_medium_medium+n_high_medium+n_frac_medium)
        far_medium = float(n_low_medium+n_high_medium+n_frac_medium)/(n_low_medium+n_medium_medium+n_high_medium+n_frac_medium)
    else:
        pod_medium =-9.0
        far_medium =-9.0
    if (n_low_high+n_medium_high+n_high_high+n_frac_high) > 0:
        pod_high = float(n_high_high)/(n_low_high+n_medium_high+n_high_high+n_frac_high)
        far_high = float(n_low_high+n_medium_high+n_frac_high)/(n_low_high+n_medium_high+n_high_high+n_frac_high)
    else:
        pod_high =-9.0
        far_high =-9.0

    statfile.write("CLOUD TYPE %s-IMAGER TABLE: %s %s %s %s %s %s %s %s %s %s %s %s \n" % (caObj.truth_sat.upper(),n_low_low,n_low_medium,n_low_high,n_medium_low,n_medium_medium,n_medium_high,n_high_low,n_high_medium,n_high_high,n_frac_low,n_frac_medium,n_frac_high))
    statfile.write("CLOUD TYPE %s-IMAGER PROB: %f %f %f %f %f %f \n" % (caObj.truth_sat.upper(),pod_low,pod_medium,pod_high,far_low,far_medium,far_high))
    statfile.write("CLOUD TYPE %s-IMAGER TABLE MISSED: %s %s %s %s %s %s %s \n" % (caObj.truth_sat.upper(),n_clear_low,n_clear_medium,n_clear_high,n_low_clear,n_medium_clear,n_high_clear,n_frac_clear))
Exemple #37
0
def dram(opts, cini, likTpr, lpinfo):
    """
    #
    # DRAM
    #
    Delayed Rejection Adaptive MCMC
    opts - dictionary of parameters for DRAM
           method : either 'am' (adaptive metropolis) or 'dram' (am+delayed rejection)
           nsteps : no. of mcmc steps
           nburn  : no. of mcmc steps for burn-in (proposal fixed to initial covariance)
           nadapt : adapt every nadapt steps after nburn
           nfinal : stop adapting after nfinal steps
           inicov : initial covariance
           coveps : small additive factor to ensure covariance matrix is positive definite
                    (only added to diagonal if covariance matrix is singular without it)
           burnsc : factor to scale up/down proposal if acceptance rate is too high/low
           gamma  : factor to multiply proposed jump size with in the chain past the burn-in phase
                    (Reduce this factor to get a higher acceptance rate.)
                    (Defaults to 1.0)
           ndr    : no. of delayed rejection steps (if dram is requested)
           drscale: scale factors for delayed rejection
           spllo  : lower bounds for chain samples
           splhi  : upper bounds for chain samples
    cini - starting mcmc state
    likTpr - log-posterior function
    lpinfo - dictionary with settings that will be passed to the log-posterior function

    Output:
      spls: chain samples (dimension nsteps x chain dimension)
      [cmode,pmode]: MAP estimate (cmode) and posterior at MAP estimate (pmode)
      [1.0-float(rej)/nsteps,
       1.0-float(rejlim)/nsteps]: acceptance ratio and fraction of samples inside the bounds
      [rej,rejlim]: total number of rejected samples and total number
                    of samples outside the bounds
      meta_info: acceptance probability and posterior probability for each sample (dimension nsteps x 2)

    To Do:
      Provide option to dump MCMC chain as the computations proceed, to avoid having such large
      files to hold all states, and so that partial output is available during the MCMC run for
      preliminary analysis.
    """
    # -------------------------------------------------------------------------------
    # Parse options
    # -------------------------------------------------------------------------------
    if 'method' in opts:
        method = opts['method']
    else:
        print 'Error in dram: method unspecified !'
        quit()

    nsteps = opts['nsteps']
    nburn = opts['nburn']
    nadapt = opts['nadapt']
    nfinal = opts['nfinal']
    inicov = opts['inicov']
    coveps = opts['coveps']
    burnsc = opts['burnsc']
    spllo = opts['spllo']
    splhi = opts['splhi']

    if 'gamma' not in opts:
        gamma = 1.0  # Default for backwards compatibility
    else:
        gamma = opts['gamma']

    if method == 'dram':
        ndr = opts['ndr']
        drscale = opts['drscale']

    if 'ofreq' not in opts:
        ofreq = 10000  # Default for backwards compatibility
    else:
        ofreq = opts['ofreq']

    rej = 0
    # Counts number of samples rejected
    rejlim = 0
    # Counts number of samples rejected as out of prior bounds
    rejsc = 0
    # Counts number of rejected samples since last rescaling
    # -------------------------------------------------------------------------------
    # Pre-processing
    # -------------------------------------------------------------------------------
    cdim = cini.shape[0]
    # chain dimensionality
    cov = npy.zeros((cdim, cdim))
    # covariance matrix
    spls = npy.zeros((nsteps, cdim))
    # MCMC samples
    meta_info = npy.zeros(
        (nsteps, 2)
    )  # Column for acceptance probability and posterior prob. of current sample
    na = 0
    # counter for accepted jumps
    sigcv = 2.4 * gamma / npy.sqrt(cdim)
    # covariance factor
    spls[0] = cini
    # initial sample set
    p1 = likTpr(spls[0], lpinfo)
    # and posterior probability of initial sample set
    meta_info[0] = [
        0.e0, p1
    ]  # Arbitrary initial acceptance and posterior probability of initial guess
    pmode = p1
    # store current chain MAP probability value
    cmode = spls[0]
    # current MAP parameter Set
    nref = 0
    # Samples since last proposal rescaling
    # -------------------------------------------------------------------------------
    # Prepare temporary file
    # -------------------------------------------------------------------------------
    tmp_file = str(uuid.uuid4()) + '.dat'
    print 'Saving intermediate chains to ', tmp_file
    # -------------------------------------------------------------------------------
    # Main loop
    # -------------------------------------------------------------------------------
    for k in range(nsteps - 1):
        #
        # Deal with covariance matrix
        #
        covMatUpd = False
        if k == 0:
            splmean = spls[0]
            propcov = inicov
            Rchol = scipy.linalg.cholesky(propcov)
            lastup = 1
            # last covariance update
            covMatUpd = True
        else:
            if (nadapt > 0) and ((k + 1) % nadapt) == 0:
                if k < nburn:
                    if float(rejsc) / nref > 0.95:
                        Rchol = Rchol / burnsc  # scale down proposal
                        covMatUpd = True
                        print "Scaling down the proposal at step", k
                    elif float(rejsc) / nref < 0.05:
                        Rchol = Rchol * burnsc  # scale up proposal
                        covMatUpd = True
                        print "Scaling up the proposal at step", k
                    nref = 0
                    rejsc = 0
                else:
                    lastup, splmean, cov = ucov(
                        spls[lastup:lastup + nadapt, :], splmean, cov, lastup)
                    try:
                        Rchol = scipy.linalg.cholesky(cov)
                    except scipy.linalg.LinAlgError:
                        try:
                            # add to diagonal to make the matrix positive definite
                            Rchol = scipy.linalg.cholesky(cov + coveps *
                                                          npy.identity(cdim))
                        except scipy.linalg.LinAlgError:
                            print "Covariance matrix is singular even after the correction"
                    Rchol = Rchol * sigcv
                    covMatUpd = True
        if (method == 'dram') and covMatUpd:
            Rmat = [Rchol]
            invRmat = [scipy.linalg.inv(Rchol)]
            for i in range(1, ndr):
                Rmat.append(Rmat[i - 1] / drscale[i - 1])
                invRmat.append(invRmat[i - 1] * drscale[i - 1])
        #-Done with covariance matrix
        nref = nref + 1
        #
        # generate proposal and check bounds
        #
        u = spls[k] + npy.dot(npy.random.randn(1, cdim), Rchol)[0]
        if npy.any(npy.less(u, spllo)) or npy.any(npy.greater(u, splhi)):
            outofbound = True
            accept = False
            p2 = -1.e100  # Arbitrarily low posterior likelihood
            pr = -1.e100  # Arbitrarily low acceptance probability
        else:
            outofbound = False
        if not outofbound:
            p2 = likTpr(u, lpinfo)
            pr = npy.exp(p2 - p1)
            if (pr >= 1.0) or (npy.random.random_sample() <= pr):
                spls[k + 1] = u.copy()
                # Store accepted sample
                meta_info[k + 1] = [pr, p2]  # and its meta information
                p1 = p2
                if p1 > pmode:
                    pmode = p1
                    cmode = spls[k + 1]
                accept = True
            else:
                accept = False
        #
        # See if we can do anything about a rejected proposal
        #
        if not accept:
            if (method == 'am'):
                # if 'am' then reject
                spls[k + 1] = spls[k]
                meta_info[k + 1,
                          0] = pr  # acceptance probability of failed sample
                meta_info[k + 1, 1] = meta_info[
                    k,
                    1]  # Posterior probability of sample k that has been retained
                rej = rej + 1
                rejsc = rejsc + 1
                if outofbound:
                    rejlim = rejlim + 1
            elif (method == 'dram'):
                # try delayed rejection
                tryspls = [spls[k].copy(), u.copy()]
                trypost = [p1, p2]
                jdr = 1
                while (not accept) and (jdr < ndr):
                    jdr = jdr + 1
                    u = spls[k] + npy.dot(npy.random.randn(1, cdim),
                                          Rmat[jdr - 1])[0]
                    if npy.any(npy.less(u, spllo)) or npy.any(
                            npy.greater(u, splhi)):
                        outofbound = True
                        tryspls.append(u.copy())
                        trypost.append(-1.0e6)
                        continue
                    outofbound = False
                    p2 = likTpr(u, lpinfo)
                    tryspls.append(u.copy())
                    trypost.append(p2)
                    alpha = getAlpha(tryspls, trypost)
                    if (alpha >= 1.0) or (npy.random.random_sample() < alpha):
                        accept = True
                        spls[k + 1] = u.copy()
                        # Store accepted sample
                        meta_info[k + 1] = [alpha,
                                            p2]  # and its meta information
                        p1 = p2
                        if p1 > pmode:
                            pmode = p1
                            cmode = spls[k + 1]
                if not accept:
                    spls[k + 1] = spls[k]
                    meta_info[
                        k + 1,
                        0] = alpha  # acceptance probability of failed sample
                    meta_info[k + 1, 1] = meta_info[
                        k,
                        1]  # Posterior probability of sample k that has been retained
                    rej = rej + 1
                    rejsc = rejsc + 1
                    if outofbound:
                        rejlim = rejlim + 1
            else:
                print "Unknown MCMC method ", method, " -> Quit\n"
                quit()
            # Done with if over methods
        # Done with if over original accept
        if ((k + 1) % ofreq == 0):
            print 'No. steps:', k + 1, ', No. of rej:', rej
            fout = open(tmp_file, 'a+')
            npy.savetxt(fout,
                        spls[k - ofreq + 1:k + 1, :],
                        fmt='%.8e',
                        delimiter=' ',
                        newline='\n')
            fout.close()
    # Done loop over all steps

    # return output: samples, MAP sample and its posterior probability, overall acceptance probability
    # and probability of having sample inside prior bounds, overall number of samples rejected, and rejected
    # due to being out of bounds.
    return (spls, [cmode, pmode],
            [1.0 - float(rej) / nsteps,
             1.0 - float(rejlim) / nsteps], [rej, rejlim], meta_info)
Exemple #38
0
def _median(imageObjectList, paramDict):
    """Create a median image from the list of image Objects
       that has been given.
    """
    newmasks = paramDict['median_newmasks']
    comb_type = paramDict['combine_type'].lower()
    nlow = paramDict['combine_nlow']
    nhigh = paramDict['combine_nhigh']
    grow = paramDict['combine_grow'] if 'minmed' in comb_type else 0
    maskpt = paramDict['combine_maskpt']
    proc_units = paramDict['proc_unit']
    compress = paramDict['compress']
    bufsizeMB = paramDict['combine_bufsize']

    sigma = paramDict["combine_nsigma"]
    sigmaSplit = sigma.split()
    nsigma1 = float(sigmaSplit[0])
    nsigma2 = float(sigmaSplit[1])

    if paramDict['combine_lthresh'] is None:
        lthresh = None
    else:
        lthresh = float(paramDict['combine_lthresh'])

    if paramDict['combine_hthresh'] is None:
        hthresh = None
    else:
        hthresh = float(paramDict['combine_hthresh'])

    # the name of the output median file isdefined in the output wcs object and
    # stuck in the image.outputValues["outMedian"] dict of every imageObject
    medianfile = imageObjectList[0].outputNames["outMedian"]

    # Build combined array from single drizzled images.

    # Start by removing any previous products...
    if os.access(medianfile, os.F_OK):
        os.remove(medianfile)

    # Define lists for instrument specific parameters, these should be in
    # the image objects need to be passed to the minmed routine
    readnoiseList = []
    exposureTimeList = []
    backgroundValueList = []  # list of  MDRIZSKY *platescale values
    singleDrizList = []  # these are the input images
    singleWeightList = []  # pointers to the data arrays
    wht_mean = []  # Compute the mean value of each wht image

    single_hdr = None
    virtual = None

    # for each image object
    for image in imageObjectList:
        if virtual is None:
            virtual = image.inmemory

        det_gain = image.getGain(1)
        img_exptime = image._image['sci', 1]._exptime
        native_units = image.native_units
        native_units_lc = native_units.lower()

        if proc_units.lower() == 'native':
            if native_units_lc not in [
                    'counts', 'electrons', 'counts/s', 'electrons/s'
            ]:
                raise ValueError(
                    "Unexpected native units: '{}'".format(native_units))

            if lthresh is not None:
                if native_units_lc.startswith('counts'):
                    lthresh *= det_gain
                if native_units_lc.endswith('/s'):
                    lthresh *= img_exptime

            if hthresh is not None:
                if native_units_lc.startswith('counts'):
                    hthresh *= det_gain
                if native_units_lc.endswith('/s'):
                    hthresh *= img_exptime

        singleDriz = image.getOutputName("outSingle")
        singleDriz_name = image.outputNames['outSingle']
        singleWeight = image.getOutputName("outSWeight")
        singleWeight_name = image.outputNames['outSWeight']

        # If compression was used, reference ext=1 as CompImageHDU only writes
        # out MEF files, not simple FITS.
        if compress:
            wcs_ext = '[1]'
            wcs_extnum = 1
        else:
            wcs_ext = '[0]'
            wcs_extnum = 0

        if not virtual:
            if isinstance(singleDriz, str):
                iter_singleDriz = singleDriz + wcs_ext
                iter_singleWeight = singleWeight + wcs_ext
            else:
                iter_singleDriz = singleDriz[wcs_extnum]
                iter_singleWeight = singleWeight[wcs_extnum]
        else:
            iter_singleDriz = singleDriz_name + wcs_ext
            iter_singleWeight = singleWeight_name + wcs_ext

        # read in WCS from first single drizzle image to use as WCS for
        # median image
        if single_hdr is None:
            if virtual:
                single_hdr = singleDriz[wcs_extnum].header
            else:
                single_hdr = fits.getheader(singleDriz_name,
                                            ext=wcs_extnum,
                                            memmap=False)

        single_image = iterfile.IterFitsFile(iter_singleDriz)
        if virtual:
            single_image.handle = singleDriz
            single_image.inmemory = True

        singleDrizList.append(single_image)  # add to an array for bookkeeping

        # If it exists, extract the corresponding weight images
        if (not virtual
                and os.access(singleWeight, os.F_OK)) or (virtual
                                                          and singleWeight):
            weight_file = iterfile.IterFitsFile(iter_singleWeight)
            if virtual:
                weight_file.handle = singleWeight
                weight_file.inmemory = True

            singleWeightList.append(weight_file)
            try:
                tmp_mean_value = ImageStats(weight_file.data,
                                            lower=1e-8,
                                            fields="mean",
                                            nclip=0).mean
            except ValueError:
                tmp_mean_value = 0.0
            wht_mean.append(tmp_mean_value * maskpt)

            # Extract instrument specific parameters and place in lists

            # If an image has zero exposure time we will
            # redefine that value as '1'.  Although this will cause inaccurate
            # scaling of the data to occur in the 'minmed' combination
            # algorith, this is a necessary evil since it avoids divide by
            # zero exceptions.  It is more important that the divide by zero
            # exceptions not cause AstroDrizzle to crash in the pipeline than
            # it is to raise an exception for this obviously bad data even
            # though this is not the type of data you would wish to process
            # with AstroDrizzle.
            #
            # Get the exposure time from the InputImage object
            #
            # MRD 19-May-2011
            # Changed exposureTimeList to take exposure time from img_exptime
            # variable instead of hte image._exptime attribute, since
            # image._exptime was just giving 1.
            #
            exposureTimeList.append(img_exptime)

            # Use only "commanded" chips to extract subtractedSky and rdnoise:
            rdnoise = 0.0
            nchips = 0
            bsky = None  # minimum sky across **used** chips

            for chip in image.returnAllChips(extname=image.scienceExt):
                # compute sky value as sky/pixel using the single_drz
                # pixel scale:
                if bsky is None or bsky > chip.subtractedSky:
                    bsky = chip.subtractedSky * chip._conversionFactor

                # Extract the readnoise value for the chip
                rdnoise += chip._rdnoise**2
                nchips += 1

            if bsky is None:
                bsky = 0.0

            if nchips > 0:
                rdnoise = math.sqrt(rdnoise / nchips)

            backgroundValueList.append(bsky)
            readnoiseList.append(rdnoise)

            print("reference sky value for image '{}' is ".format(
                image._filename, backgroundValueList[-1]))
        #
        # END Loop over input image list
        #

    # create an array for the median output image, use the size of the first
    # image in the list. Store other useful image characteristics:
    single_driz_data = singleDrizList[0].data
    data_item_size = single_driz_data.itemsize
    single_data_dtype = single_driz_data.dtype
    imrows, imcols = single_driz_data.shape

    medianImageArray = np.zeros_like(single_driz_data)

    del single_driz_data

    if comb_type == "minmed" and not newmasks:
        # Issue a warning if minmed is being run with newmasks turned off.
        print('\nWARNING: Creating median image without the application of '
              'bad pixel masks!\n')

    # The overlap value needs to be set to 2*grow in order to
    # avoid edge effects when scrolling down the image, and to
    # insure that the last section returned from the iterator
    # has enough rows to span the kernel used in the boxcar method
    # within minmed.
    overlap = 2 * grow
    buffsize = BUFSIZE if bufsizeMB is None else (BUFSIZE * bufsizeMB)
    section_nrows = min(imrows, int(buffsize / (imcols * data_item_size)))

    if section_nrows == 0:
        buffsize = imcols * data_item_size
        print("WARNING: Buffer size is too small to hold a single row.\n"
              "         Buffer size size will be increased to minimal "
              "required: {}MB".format(float(buffsize) / 1048576.0))
        section_nrows = 1

    if section_nrows < overlap + 1:
        new_grow = int((section_nrows - 1) / 2)
        if section_nrows == imrows:
            print("'grow' parameter is too large for actual image size. "
                  "Reducing 'grow' to {}".format(new_grow))
        else:
            print("'grow' parameter is too large for requested buffer size. "
                  "Reducing 'grow' to {}".format(new_grow))
        grow = new_grow
        overlap = 2 * grow

    nbr = section_nrows - overlap
    nsec = (imrows - overlap) // nbr
    if (imrows - overlap) % nbr > 0:
        nsec += 1

    for k in range(nsec):
        e1 = k * nbr
        e2 = e1 + section_nrows
        u1 = grow
        u2 = u1 + nbr

        if k == 0:  # first section
            u1 = 0

        if k == nsec - 1:  # last section
            e2 = min(e2, imrows)
            e1 = min(e1, e2 - overlap - 1)
            u2 = e2 - e1

        imdrizSectionsList = np.empty((len(singleDrizList), e2 - e1, imcols),
                                      dtype=single_data_dtype)
        for i, w in enumerate(singleDrizList):
            imdrizSectionsList[i, :, :] = w[e1:e2]

        if singleWeightList:
            weightSectionsList = np.empty(
                (len(singleWeightList), e2 - e1, imcols),
                dtype=single_data_dtype)
            for i, w in enumerate(singleWeightList):
                weightSectionsList[i, :, :] = w[e1:e2]
        else:
            weightSectionsList = None

        weight_mask_list = None

        if newmasks and weightSectionsList is not None:
            # Build new masks from single drizzled images.
            # Generate new pixel mask file for median step.
            # This mask will be created from the single-drizzled
            # weight image for this image.

            # The mean of the weight array will be computed and all
            # pixels with values less than 0.7 of the mean will be flagged
            # as bad in this mask. This mask will then be used when
            # creating the median image.
            # 0 means good, 1 means bad here...
            weight_mask_list = np.less(
                weightSectionsList,
                np.asarray(wht_mean)[:, None, None]).astype(np.uint8)

        if 'minmed' in comb_type:  # Do MINMED
            # set up use of 'imedian'/'imean' in minmed algorithm
            fillval = comb_type.startswith('i')

            # Create the combined array object using the minmed algorithm
            result = min_med(imdrizSectionsList,
                             weightSectionsList,
                             readnoiseList,
                             exposureTimeList,
                             backgroundValueList,
                             weight_masks=weight_mask_list,
                             combine_grow=grow,
                             combine_nsigma1=nsigma1,
                             combine_nsigma2=nsigma2,
                             fillval=fillval)

        else:  # DO NUMCOMBINE
            # Create the combined array object using the numcombine task
            result = numcombine.num_combine(imdrizSectionsList,
                                            masks=weight_mask_list,
                                            combination_type=comb_type,
                                            nlow=nlow,
                                            nhigh=nhigh,
                                            upper=hthresh,
                                            lower=lthresh)

        # Write out the processed image sections to the final output array:
        medianImageArray[e1 + u1:e1 + u2, :] = result[u1:u2, :]

    # Write out the combined image
    # use the header from the first single drizzled image in the list
    pf = _writeImage(medianImageArray, inputHeader=single_hdr)

    if virtual:
        mediandict = {}
        mediandict[medianfile] = pf
        for img in imageObjectList:
            img.saveVirtualOutputs(mediandict)
    else:
        try:
            print("Saving output median image to: '{}'".format(medianfile))
            pf.writeto(medianfile)
        except IOError:
            msg = "Problem writing file '{}'".format(medianfile)
            print(msg)
            raise IOError(msg)

    # Always close any files opened to produce median image; namely,
    # single drizzle images and singly-drizzled weight images
    #
    for img in singleDrizList:
        if not virtual:
            img.close()

    # Close all singly drizzled weight images used to create median image.
    for img in singleWeightList:
        if not virtual:
            img.close()
Exemple #39
0
maxplotfreq = 15000.0

plt.subplot(4, 1, 1)
plt.plot(np.arange(x.size) / float(fs), x)
plt.axis([0, x.size / float(fs), min(x), max(x)])
plt.title('x (flute-A4.wav)')

plt.subplot(4, 1, 2)
maxplotbin = int(N * maxplotfreq / fs)
numFrames = int(mXr[:, 0].size)
frmTime = H * np.arange(numFrames) / float(fs)
binFreq = np.arange(maxplotbin + 1) * float(fs) / N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:, :maxplotbin + 1]))
plt.autoscale(tight=True)

harms = hfreq * np.less(hfreq, maxplotfreq)
harms[harms == 0] = np.nan
numFrames = int(harms[:, 0].size)
frmTime = H * np.arange(numFrames) / float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('harmonics + residual spectrogram')

plt.subplot(4, 1, 3)
maxplotbin = int(N * maxplotfreq / fs)
numFrames = int(mXr[:, 0].size)
frmTime = H * np.arange(numFrames) / float(fs)
binFreq = np.arange(maxplotbin + 1) * float(fs) / N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:, :maxplotbin + 1]))
plt.autoscale(tight=True)
Exemple #40
0
def compute(flow, f, data):
    # Copy input tensors.
    v = {}
    for i in flow.inputs(f):
        if i.data is None:
            v[i] = np.asarray(data.tensor(i))
        else:
            v[i] = np.array(i.data, dtype=nptypes[i.type])

    # Get ops in computation order.
    _, ops = flow.order(f)

    # Compute ops using numpy.
    for op in ops:
        i = op.inputs
        o = op.outputs

        if op.type == "MatMul":
            v[o[0]] = np.matmul(v[i[0]], v[i[1]])
        elif op.type == "Exp":
            v[o[0]] = np.exp(v[i[0]])
        elif op.type == "Sigmoid":
            v[o[0]] = sigmoid(v[i[0]])
        elif op.type == "Log":
            v[o[0]] = np.log(v[i[0]])
        elif op.type == "Pow":
            v[o[0]] = np.power(v[i[0]], v[i[1]])
        elif op.type == "Erf":
            v[o[0]] = erf(v[i[0]])
        elif op.type == "Sin":
            v[o[0]] = np.sin(v[i[0]])
        elif op.type == "Cos":
            v[o[0]] = np.cos(v[i[0]])
        elif op.type == "Tan":
            v[o[0]] = np.tan(v[i[0]])
        elif op.type == "Asin":
            v[o[0]] = np.arcsin(v[i[0]])
        elif op.type == "Acos":
            v[o[0]] = np.arccos(v[i[0]])
        elif op.type == "Atan":
            v[o[0]] = np.arctan(v[i[0]])
        elif op.type == "Sinh":
            v[o[0]] = np.sinh(v[i[0]])
        elif op.type == "Cosh":
            v[o[0]] = np.cosh(v[i[0]])
        elif op.type == "Tanh":
            v[o[0]] = np.tanh(v[i[0]])
        elif op.type == "Asinh":
            v[o[0]] = np.arcsinh(v[i[0]])
        elif op.type == "Acosh":
            v[o[0]] = np.arccosh(v[i[0]])
        elif op.type == "Atanh":
            v[o[0]] = np.arctanh(v[i[0]])
        elif op.type == "Relu":
            v[o[0]] = relu(v[i[0]])
        elif op.type == "Sqrt":
            v[o[0]] = np.sqrt(v[i[0]])
        elif op.type == "Rsqrt":
            v[o[0]] = 1 / np.sqrt(v[i[0]])
        elif op.type == "Square":
            v[o[0]] = np.square(v[i[0]])
        elif op.type == "Neg":
            v[o[0]] = -v[i[0]]
        elif op.type == "Abs":
            v[o[0]] = np.abs(v[i[0]])
        elif op.type == "Sign":
            v[o[0]] = np.sign(v[i[0]])
        elif op.type == "Add":
            v[o[0]] = v[i[0]] + v[i[1]]
        elif op.type == "Sub":
            v[o[0]] = v[i[0]] - v[i[1]]
        elif op.type == "Mul":
            v[o[0]] = v[i[0]] * v[i[1]]
        elif op.type == "Div":
            v[o[0]] = np.divide(v[i[0]], v[i[1]]).astype(nptypes[o[0].type])
        elif op.type == "Minimum":
            v[o[0]] = np.minimum(v[i[0]], v[i[1]])
        elif op.type == "Maximum":
            v[o[0]] = np.maximum(v[i[0]], v[i[1]])
        elif op.type == "Reciprocal":
            v[o[0]] = np.divide(1, v[i[0]])
        elif op.type == "Floor":
            v[o[0]] = np.floor(v[i[0]])
        elif op.type == "Ceil":
            v[o[0]] = np.ceil(v[i[0]])
        elif op.type == "Round":
            v[o[0]] = np.round(v[i[0]])
        elif op.type == "Trunc":
            v[o[0]] = np.trunc(v[i[0]])
        elif op.type == "Sum":
            axis = op.attrs.get("axis")
            if axis is None:
                v[o[0]] = np.sum(v[i[0]])
            else:
                keepdims = bool(op.attrs.get("keepdims"))
                v[o[0]] = np.sum(v[i[0]], int(axis), keepdims=keepdims)
        elif op.type == "Max":
            axis = op.attrs.get("axis")
            if axis is None:
                v[o[0]] = np.max(v[i[0]])
            else:
                keepdims = bool(op.attrs.get("keepdims"))
                v[o[0]] = np.max(v[i[0]], int(axis), keepdims=keepdims)
        elif op.type == "Min":
            axis = op.attrs.get("axis")
            if axis is None:
                v[o[0]] = np.min(v[i[0]])
            else:
                keepdims = bool(op.attrs.get("keepdims"))
                v[o[0]] = np.min(v[i[0]], int(axis), keepdims=keepdims)
        elif op.type == "Product":
            axis = op.attrs.get("axis")
            if axis is None:
                v[o[0]] = np.prod(v[i[0]])
            else:
                keepdims = bool(op.attrs.get("keepdims"))
                v[o[0]] = np.prod(v[i[0]], int(axis), keepdims=keepdims)
        elif op.type == "All":
            axis = op.attrs.get("axis")
            if axis is None:
                v[o[0]] = np.all(v[i[0]])
            else:
                keepdims = bool(op.attrs.get("keepdims"))
                v[o[0]] = np.all(v[i[0]], int(axis), keepdims=keepdims)
        elif op.type == "Any":
            axis = op.attrs.get("axis")
            if axis is None:
                v[o[0]] = np.any(v[i[0]])
            else:
                keepdims = bool(op.attrs.get("keepdims"))
                v[o[0]] = np.any(v[i[0]], axis, keepdims=keepdims)
        elif op.type == "Count":
            v[o[0]] = np.array(np.count_nonzero(v[i[0]]), nptypes[o[0].type])
        elif op.type == "ArgMin":
            v[o[0]] = np.argmin(v[i[0]])
        elif op.type == "ArgMax":
            v[o[0]] = np.argmax(v[i[0]])
        elif op.type == "Equal":
            v[o[0]] = np.equal(v[i[0]], v[i[1]])
        elif op.type == "NotEqual":
            v[o[0]] = np.not_equal(v[i[0]], v[i[1]])
        elif op.type == "Less":
            v[o[0]] = np.less(v[i[0]], v[i[1]])
        elif op.type == "LessEqual":
            v[o[0]] = np.less_equal(v[i[0]], v[i[1]])
        elif op.type == "Greater":
            v[o[0]] = np.greater(v[i[0]], v[i[1]])
        elif op.type == "GreaterEqual":
            v[o[0]] = np.greater_equal(v[i[0]], v[i[1]])
        elif op.type == "And":
            v[o[0]] = np.logical_and(v[i[0]], v[i[1]])
        elif op.type == "Or":
            v[o[0]] = np.logical_or(v[i[0]], v[i[1]])
        elif op.type == "Xor":
            v[o[0]] = np.logical_xor(v[i[0]], v[i[1]])
        elif op.type == "Not":
            v[o[0]] = np.logical_not(v[i[0]])
        elif op.type == "Cond":
            v[o[0]] = np.where((v[i[0]] != 0), v[i[1]], v[i[2]])
        elif op.type == "Select":
            v[o[0]] = np.where((v[i[0]] != 0), v[i[1]], 0)
        elif op.type == "Transpose":
            if "perm" in op.attrs:
                perm = eval(op.attrs["perm"])
                v[o[0]] = np.transpose(v[i[0]], axes=perm)
            else:
                v[o[0]] = np.transpose(v[i[0]])
        elif op.type == "Shape":
            v[o[0]] = np.array(v[i[0]].shape)
        elif op.type == "Size":
            v[o[0]] = np.array(v[i[0]].size)
        elif op.type == "Rank":
            v[o[0]] = np.array(len(v[i[0]].shape))
        elif op.type == "Identity":
            v[o[0]] = v[i[0]]
        elif op.type == "ConcatV2":
            n = int(op.attr("N"))
            axis = v[i[n]]
            seq = []
            for k in range(n):
                seq.append(v[i[k]])
            v[o[0]] = np.concatenate(tuple(seq), axis)
        elif op.type == "Split":
            splits = np.split(v[i[0]], v[i[1]], v[i[2]])
            for k in range(len(splits)):
                v[o[k]] = splits[k]
        elif op.type == "Gather":
            v[o[0]] = gather(v[i[0]], v[i[1]])
        elif op.type == "Reshape":
            v[o[0]] = np.reshape(v[i[0]], v[i[1]])
        elif op.type == "Assign":
            v[i[0]] = v[i[1]]
        else:
            raise Exception("No NumPy support for " + op.type)

    # Return results.
    return v
Exemple #41
0
def match_template(image,
                   template,
                   pad_input=False,
                   mode='constant',
                   constant_values=0):
    """Match a template to a 2-D or 3-D image using normalized correlation.

    The output is an array with values between -1.0 and 1.0. The value at a
    given position corresponds to the correlation coefficient between the image
    and the template.

    For `pad_input=True` matches correspond to the center and otherwise to the
    top-left corner of the template. To find the best match you must search for
    peaks in the response (output) image.

    Parameters
    ----------
    image : (M, N[, D]) array
        2-D or 3-D input image.
    template : (m, n[, d]) array
        Template to locate. It must be `(m <= M, n <= N[, d <= D])`.
    pad_input : bool
        If True, pad `image` so that output is the same size as the image, and
        output values correspond to the template center. Otherwise, the output
        is an array with shape `(M - m + 1, N - n + 1)` for an `(M, N)` image
        and an `(m, n)` template, and matches correspond to origin
        (top-left corner) of the template.
    mode : see `numpy.pad`, optional
        Padding mode.
    constant_values : see `numpy.pad`, optional
        Constant values used in conjunction with ``mode='constant'``.

    Returns
    -------
    output : array
        Response image with correlation coefficients.

    Notes
    -----
    Details on the cross-correlation are presented in [1]_. This implementation
    uses FFT convolutions of the image and the template. Reference [2]_
    presents similar derivations but the approximation presented in this
    reference is not used in our implementation.

    References
    ----------
    .. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light
           and Magic.
    .. [2] Briechle and Hanebeck, "Template Matching using Fast Normalized
           Cross Correlation", Proceedings of the SPIE (2001).
           :DOI:`10.1117/12.421129`

    Examples
    --------
    >>> template = np.zeros((3, 3))
    >>> template[1, 1] = 1
    >>> template
    array([[ 0.,  0.,  0.],
           [ 0.,  1.,  0.],
           [ 0.,  0.,  0.]])
    >>> image = np.zeros((6, 6))
    >>> image[1, 1] = 1
    >>> image[4, 4] = -1
    >>> image
    array([[ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  1.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0., -1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.]])
    >>> result = match_template(image, template)
    >>> np.round(result, 3)
    array([[ 1.   , -0.125,  0.   ,  0.   ],
           [-0.125, -0.125,  0.   ,  0.   ],
           [ 0.   ,  0.   ,  0.125,  0.125],
           [ 0.   ,  0.   ,  0.125, -1.   ]])
    >>> result = match_template(image, template, pad_input=True)
    >>> np.round(result, 3)
    array([[-0.125, -0.125, -0.125,  0.   ,  0.   ,  0.   ],
           [-0.125,  1.   , -0.125,  0.   ,  0.   ,  0.   ],
           [-0.125, -0.125, -0.125,  0.   ,  0.   ,  0.   ],
           [ 0.   ,  0.   ,  0.   ,  0.125,  0.125,  0.125],
           [ 0.   ,  0.   ,  0.   ,  0.125, -1.   ,  0.125],
           [ 0.   ,  0.   ,  0.   ,  0.125,  0.125,  0.125]])
    """
    check_nD(image, (2, 3))

    if image.ndim < template.ndim:
        raise ValueError("Dimensionality of template must be less than or "
                         "equal to the dimensionality of image.")
    if np.any(np.less(image.shape, template.shape)):
        raise ValueError("Image must be larger than template.")

    image_shape = image.shape

    image = np.array(image, dtype=np.float64, copy=False)

    pad_width = tuple((width, width) for width in template.shape)
    if mode == 'constant':
        image = np.pad(image,
                       pad_width=pad_width,
                       mode=mode,
                       constant_values=constant_values)
    else:
        image = np.pad(image, pad_width=pad_width, mode=mode)

    # Use special case for 2-D images for much better performance in
    # computation of integral images
    if image.ndim == 2:
        image_window_sum = _window_sum_2d(image, template.shape)
        image_window_sum2 = _window_sum_2d(image**2, template.shape)
    elif image.ndim == 3:
        image_window_sum = _window_sum_3d(image, template.shape)
        image_window_sum2 = _window_sum_3d(image**2, template.shape)

    template_mean = template.mean()
    template_volume = np.prod(template.shape)
    template_ssd = np.sum((template - template_mean)**2)

    if image.ndim == 2:
        xcorr = fftconvolve(image, template[::-1, ::-1], mode="valid")[1:-1,
                                                                       1:-1]
    elif image.ndim == 3:
        xcorr = fftconvolve(image, template[::-1, ::-1, ::-1],
                            mode="valid")[1:-1, 1:-1, 1:-1]

    numerator = xcorr - image_window_sum * template_mean

    denominator = image_window_sum2
    np.multiply(image_window_sum, image_window_sum, out=image_window_sum)
    np.divide(image_window_sum, template_volume, out=image_window_sum)
    denominator -= image_window_sum
    denominator *= template_ssd
    np.maximum(denominator, 0,
               out=denominator)  # sqrt of negative number not allowed
    np.sqrt(denominator, out=denominator)

    response = np.zeros_like(xcorr, dtype=np.float64)

    # avoid zero-division
    mask = denominator > np.finfo(np.float64).eps

    response[mask] = numerator[mask] / denominator[mask]

    slices = []
    for i in range(template.ndim):
        if pad_input:
            d0 = (template.shape[i] - 1) // 2
            d1 = d0 + image_shape[i]
        else:
            d0 = template.shape[i] - 1
            d1 = d0 + image_shape[i] - template.shape[i] + 1
        slices.append(slice(d0, d1))

    return response[tuple(slices)]
Exemple #42
0
def delta_leaky_relu(z, alpha=0.3):
    z = np.asarray(z)
    return np.greater_equal(z,
                            0).astype(int) + np.less(z, 0).astype(int) * alpha
    def training(self):
        self.logger("[Info] 開始訓練")
        e_old = np.array([[0], [0]], dtype='f')
        if self.option == 1:
            self.logger("[Info] 使用題目訓練資料(8筆)")
            while not self.noChange and self.iteration < self.maxIteration:
                self.noChange = True
                e_new = np.array([[0], [0]], dtype='f')

                with open('data/training_data1.txt') as f:
                    for line in f:
                        cont = line.split()
                        p = np.array([[float(cont[0])], [float(cont[1])]],
                                     dtype='f')
                        t = np.array([[float(cont[2])], [float(cont[3])]],
                                     dtype='f')

                        a = np.add(np.dot(self.weight, p), self.bias)
                        e = t - a

                        self.weight = np.add(
                            self.weight,
                            2 * self.learningRate * np.dot(e, np.transpose(p)))
                        self.bias = np.add(self.bias,
                                           2 * self.learningRate * e)

                        e_new = np.add(e_new, e)

                e_new = e_new / 8
                if not np.all(
                        np.less(np.absolute(e_new - e_old), self.tolerate)):
                    self.noChange = False
                e_old = e_new
                self.iteration += 1
        elif self.option == 2:
            self.logger("[Info] 使用額外訓練資料(1000筆)")
            while not self.noChange and self.iteration < self.maxIteration:
                self.noChange = True
                e_new = np.array([[0], [0]], dtype='f')

                with open('data/training_data2.txt') as f:
                    for line in f:
                        cont = line.split()
                        self.trainingData.append(
                            [float(cont[0]),
                             float(cont[1]), cont[2]])
                        p = np.array([[float(cont[0])], [float(cont[1])]],
                                     dtype='f')
                        t = self.target2Matrix(str(cont[2]))

                        a = np.add(np.dot(self.weight, p), self.bias)
                        e = t - a

                        self.weight = np.add(
                            self.weight,
                            2 * self.learningRate * np.dot(e, np.transpose(p)))
                        self.bias = np.add(self.bias,
                                           2 * self.learningRate * e)

                        print(a)
                        print(e)
                        print(self.weight)
                        print(self.bias)
                        print("--------------")
                        time.sleep(1)

                        e_new = np.add(e_new, e)

                e_new = e_new / 1000
                if not np.all(
                        np.less(np.absolute(e_new - e_old), self.tolerate)):
                    self.noChange = False

                e_old = e_new
                self.iteration += 1
        self.printTrainingResult()
def CurveAreaAltezza(mydb_path_user,DamID,PathFiles):

    """
    The cript loads:
        - StreamDHFilled.tif
        - Tratti.tif         : grid with classes of stretches of river starting from the dam
                               the number represents in number of cells of
                               distance counted along the axis of the river

    counts for each section and for each dh with a 1-meter step in the number
    of cells underlying the difference in height dh from the river
    It also constructs the curves of the number of cumulated cells along the path of the
    river for every dh with 1 meter step

    Save counts in two csv files:
        - MatricePixel.csv
        - MatricePixCum.csv
    """


    NotErr=bool('True')
    errMsg='OK'

    PathFiles=os.path.realpath(PathFiles)

    # files output
    filecsv1=PathFiles+os.sep+'MatricePixel.csv'
    filecsv2=PathFiles+os.sep+'MatricePixCum.csv'

    # contains the percentage of area in the orographic right
    filecsvPixDestra=PathFiles+os.sep+'MatricePixDestra.csv'

    if not os.path.exists(PathFiles):
        errMsg = "Missing data for the dam num =%s \nPerform the calculation of the downstream sections first !" % (DamID)
        NotErr= bool()
        return NotErr, errMsg

    StreamDH=PathFiles+os.sep+'StreamDHFilled.tif'
    if not os.path.exists(StreamDH):
        errMsg = "Missing for the dam num =%s the grid StreamDHFilled\nPerform first ModificaDH !" % (DamID)
        NotErr= bool()
        return NotErr, errMsg

    Tratti=PathFiles+os.sep+'Tratti.tif'
    if not os.path.exists(Tratti):
        errMsg = "Missing for the dam num =%s the Grid Tratti\nPerform first CreaSezInterpolate !" % (DamID)
        NotErr= bool()
        return NotErr, errMsg

    DestraSinistra=PathFiles+os.sep+'DestraSinistra.tif'
    if not os.path.exists(DestraSinistra):
        errMsg = "Missing for the dam num =%s the Grid DestraSinistra\nPerform first CreaSezInterpolate !" % (DamID)
        NotErr= bool()
        return NotErr, errMsg



    # ==================
    # Reading GRID
    # ==================

    gdal.AllRegister()

    indataset = gdal.Open(Tratti, GA_ReadOnly )
    if indataset is None:
        print ('Could not open ' + Tratti)
        sys.exit(1)

    geotransform = indataset.GetGeoTransform()

    originX = geotransform[0]
    originY = geotransform[3]
    pixelWidth = geotransform[1]
    pixelHeight = geotransform[5]
    cols=indataset.RasterXSize
    rows=indataset.RasterYSize
    bands=indataset.RasterCount
    iBand = 1
    inband = indataset.GetRasterBand(iBand)
    inNoData= inband.GetNoDataValue()

    cellsize=pixelWidth

    OriginData=[originX,originY,pixelWidth,pixelHeight,cols,rows]

    prj = indataset.GetProjectionRef()

    spatialRef = osr.SpatialReference()
    try:
        spatialRef.ImportFromWkt(prj)
    except:
        pass
        spatialRef.ImportFromEPSG(32632)

    TrattiArray = inband.ReadAsArray(0, 0, cols, rows).astype(np.int)

    TrattiVector_ini=np.unique(TrattiArray)

    inband = None
    indataset = None

    # Reading StreamDH
    # -----------------

    if not os.path.exists(StreamDH):
        errMsg = "File StreamDH %s does not exists" % os.path.realpath(StreamDH)
        NotErr= bool()
        return NotErr, errMsg

    infile=StreamDH

    indatasetElev = gdal.Open( infile, GA_ReadOnly )
    if indatasetElev is None:
        print ('Could not open ' + infile)
        sys.exit(1)

    prj = indatasetElev.GetProjectionRef()

    gt = indatasetElev.GetGeoTransform()

    ok=ControlloCongruenzaGRID(OriginData,indatasetElev,gt)

    if not ok:
        errMsg = 'Grid error: %s' % infile
        NotErr= bool()
        return NotErr, errMsg


    originXElev = gt[0]
    originYElev = gt[3]
    pixelWidthElev = gt[1]
    pixelHeightElev = gt[5]
    colsElev=indatasetElev.RasterXSize
    rowsElev=indatasetElev.RasterYSize
    bandsElev=indatasetElev.RasterCount
    iBand = 1
    inbandElev = indatasetElev.GetRasterBand(iBand)
    inNoDataElev= inbandElev.GetNoDataValue()

    # reading the entire file at once
    DH = inbandElev.ReadAsArray(0, 0, colsElev, rowsElev).astype(np.float32)
    mask_Nodata= DH==inNoDataElev

    TrattiVector_ini=np.unique(TrattiArray)

    # assigning, for congruency, the Nodata map of the DH also to the TrattiArray
    TrattiArray=np.choose(mask_Nodata,(TrattiArray,inNoData))

    TrattiVector_ini2=np.unique(TrattiArray)

    # eliminates negative values
    DH=np.choose(np.less(DH,0.0),(DH,0.0))

    inbandElev=None

    indatasetElev= None

    # Reading grid DestraSinistra
    # --------------------------
    indatasetDx = gdal.Open(DestraSinistra, GA_ReadOnly )
    if indatasetDx is None:
        errMsg = 'Could not open ' + DestraSinistra
        NotErr= bool()
        return NotErr, errMsg

    gtDx = indatasetDx.GetGeoTransform()

    ok=ControlloCongruenzaGRID(OriginData,indatasetDx,gtDx)

    if not ok:
        errMsg = 'Grid non conguente: %s' % indatasetDx
        NotErr= bool()
        return NotErr, errMsg

    originX = gtDx[0]
    originY = gtDx[3]
    pixelWidth = gtDx[1]
    pixelHeight = gtDx[5]
    cols=indatasetDx.RasterXSize
    rows=indatasetDx.RasterYSize
    bands=indatasetDx.RasterCount
    iBand = 1
    inbandDx = indatasetDx.GetRasterBand(iBand)
    inNoDataDx= inbandDx.GetNoDataValue()

    prj = indatasetDx.GetProjectionRef()

    spatialRef = osr.SpatialReference()
    try:
        spatialRef.ImportFromWkt(prj)
    except:
        pass
        spatialRef.ImportFromEPSG(32632)

    # reading array of the right fluvial zone = 1
    TrattiArrayDx = inbandDx.ReadAsArray(0, 0, cols, rows).astype(np.int)

    # creating the mask of the right area
    mask_Dx=np.equal(TrattiArrayDx,1)
    numDx=mask_Dx.sum()

    inbandDx = None
    indatasetDx = None


    # creates the list of river sections
    # ------------------------
    TrattiVector1=np.unique(TrattiArray)
    # discard data <0 (Nodata and data outside the river sections)
    mask=np.where(TrattiVector1>0)[0]

    # final array
    TrattiVector=TrattiVector1[mask]

    # Maximum height for which the curves are created
    Hmax=51

    MatricePix=[]
    VettorePixHmax=[]
    VettoreVolHmax=[]

    # Matrix of the percentage on the right
    MatricePixDx=[]

    for tratto in TrattiVector:
        # mask
        mask_tratto=np.equal(TrattiArray,tratto)

        numeropixel=mask_tratto.sum()
        VettorePix=[]
        VettorePixDx=[]
        for h in range(1,Hmax):
            mask=np.less_equal(DH,h) & mask_tratto
            nn=mask.sum()
            if nn>0:
                # select the pixels of the river section <h
                DH_cur=DH[np.where(mask)]
                # sum the heights: equivalent to the volume in terms of pixels * h
                Vol_h=np.sum(np.absolute(DH_cur),dtype=np.float32)
                # the empty volume is obtained by the difference
                Vol_d_valle=float(nn)*h-Vol_h

                VettorePix.append(Vol_d_valle)

                # finding those on the right hydrographic
                # ---------------------------------
                mask2=mask & mask_Dx
                ndx=mask2.sum()
                # select the pixels of the river section <h
                DH_curDx=DH[np.where(mask2)]
                # sum the heights: equivalent to the volume in terms of pixels * h
                Vol_h_Dx=np.sum(np.absolute(DH_curDx),dtype=np.float32)
                # the empty volume is obtained by the difference
                Vol_h_valleDx=float(ndx)*h-Vol_h_Dx

                if Vol_d_valle>0:
                    PercDx=Vol_h_valleDx/Vol_d_valle
                else:
                    PercDx=0.0
                VettorePixDx.append(PercDx)
            else:
                VettorePix.append(0.0)
                VettorePixDx.append(0.0)

        MatricePix.append(VettorePix)
        MatricePixDx.append(VettorePixDx)

    # creates the matrix of the areas accumulated at equal height from the riverbed
    MatricePixCum=[]
    nh=len(MatricePix[0])
    ndist=len(MatricePix)
    for j in range(nh):
        curva=[]
        Acum=0
        for i in range(ndist):
            Acum+=MatricePix[i][j]
            curva.append(Acum)
        MatricePixCum.append(curva)


    # save MatricePixel & MatricePixDx matrices
    fout=open(filecsv1,'w')
    foutDx=open(filecsvPixDestra,'w')

    txt='PixDist'
    hvals=[]
    for j in range(1,Hmax):
        nome='h=%.1f' % (j)
        hvals.append(nome)
        txt+=';h=%.1f' % (j)
    txt+='\n'
    fout.write(txt)
    foutDx.write(txt)

    nn=len(TrattiVector)
    for i in range(nn):
        txt='%d' % TrattiVector[i]
        row = MatricePix[i]
        for rec in row:
            txt+=';%.2f' % rec
        txt+='\n'
        fout.write(txt)

        rowDx = MatricePixDx[i]
        txt='%d' % TrattiVector[i]
        for rec in rowDx:
            txt+=';%.4f' % rec
        txt+='\n'
        foutDx.write(txt)

    fout.close()
    foutDx.close()

    # save MatricePixCum
    fout=open(filecsv2,'w')
    txt='PixDist'
    for j in range(1,Hmax):
        txt+=';h=%.1f' % (j)
    txt+='\n'
    fout.write(txt)
    nn=len(TrattiVector)
    for i in range(nn):
        txt='%d' % TrattiVector[i]
        for j in range(nh):
           txt+=';%.2f' % MatricePixCum[j][i]
        txt+='\n'
        fout.write(txt)
    fout.close()



    grafici=0
    if grafici>0:
        # ----------
        fontP = FontProperties()
        fontP.set_size('small')

        fig, ax = plt.subplots()
        nomi=[]
        x=np.array(TrattiVector,dtype =np.float)
        x=x*cellsize/1000.0
        A1=np.array(MatricePix,dtype =np.float)
        A1T=A1.T

        i=-1
        for curva in A1T:
            i=i+1
            ax.plot(x, curva, '-o')
            nomi.append(hvals[i])

        ax.legend( (nomi),loc='upper left',prop = fontP )
        ax.set_ylabel('Num. Cells',color='blue')
        ax.set_xlabel('Distance (km)',color='blue')
        txt='Area of the Valley for different heights from the river, starting from the dam\n'
        plt.title(txt)

        plt.grid()
        plt.show()

        # ---------
        # GRAPH 2
        fig, ax = plt.subplots()
        nomi=[]
        A=np.array(MatricePixCum,dtype =np.float)

        i=-1
        for curva in A:
            i=i+1
            ax.plot(x, curva, '-')
            nomi.append(hvals[i])

        ax.legend( (nomi),loc='upper left',prop = fontP )
        ax.set_ylabel('Num. Cells',color='blue')
        ax.set_xlabel('Distance (km)',color='blue')
        txt='Cumulated Valley Area for different heights from the river, starting from the dam\n'
        plt.title(txt)

        plt.grid()
        plt.show()

    return NotErr, errMsg
    def numpy_operations(self):
        # list of program
        print()
        print("11. find the number of elements of an array, length of one array element""\n"
              "   in bytes and total bytes consumed by the elements")
        print()
        print("12. find common values between two arrays")
        print()
        print("13. find the set difference of two arrays. The set difference will return the sorted,""\n"
              "   unique values in array1 that are not in array2.")
        print()
        print("14. find the set exclusive-or of two arrays. Set exclusive-or will return the sorted,""\n"
              "   unique values that are in only one (not both) of the input arrays. ")
        print()
        print("15. compare two arrays using numpy")
        print()
        print("16. save a NumPy array to a text file")
        print()
        print("17. create a contiguous flattened array")
        print()
        print("18. change the data type of an array")
        print()
        print("19. create a 3-D array with ones on a diagonal and zeros elsewhere.")
        print()
        print("20.  program to create an array which looks like below array")
        print(np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]]))
        print()
        print("0. Exit")
        print("--------------------------------------------------------------------------------------")
        while True:
            try:
                print()
                # accept choice from user
                self.choice = input("Enter choice : ")
                # validate choice number
                valid_choice = validate_num(self.choice)
                if valid_choice:
                    choice = int(self.choice)
                    if choice == 11:
                        # array declaration
                        arr = np.array([10, 20, 30, 40, 50], dtype=float)
                        # get size of element
                        print("Number of element : ", arr.size)
                        # get length of element
                        print("Length of 1 element : ", arr.itemsize)
                        # get total bytes consumed by elements of array
                        print("Total bytes consumed by elements : ", arr.nbytes)
                    elif choice == 12:
                        arr1 = np.array([10, 20, 30, 40, 50])
                        print("Array 1: ", arr1)
                        arr2 = np.array([15, 25, 10, 35, 30])
                        print("Array 2: ", arr2)
                        print("Common values : ", np.intersect1d(arr1, arr2))
                    elif choice == 13:
                        arr1 = np.array([0, 10, 60, 40, 20, 80])
                        print("Array 1: ", arr1)
                        arr2 = np.array([10, 30, 40, 50, 70, 90])
                        print("Array 2: ", arr2)
                        # getting set difference between two arrays
                        print("Set difference between two arrays: ", np.setdiff1d(arr1, arr2))
                    elif choice == 14:
                        arr1 = np.array([0, 10, 20, 40, 60, 80])
                        print("Array 1: ", arr1)
                        arr2 = np.array([10, 30, 40, 50, 70])
                        print("Array 2: ", arr2)
                        print("set exclusive-or of two arrays : ", np.setxor1d(arr1, arr2))
                    elif choice == 15:
                        arr1 = np.array([1, 2])
                        print("Array 1: ", arr1)
                        arr2 = np.array([3, 4])
                        print("Array 2: ", arr2)
                        print("Array 1 < Array 2 : ", np.less(arr1, arr2))
                        print("Array 1 <= Array 2 : ", np.less_equal(arr1, arr2))
                        print("Array 1 > Array 2 : ", np.greater(arr1, arr2))
                        print("Array 1 >= Arrays 2 : ", np.greater_equal(arr1, arr2))
                    elif choice == 16:
                        a = np.arange(0, 10, 1)
                        # save array in text file
                        np.savetxt("array.txt", a[:], fmt="%d")
                        print("Array saved in array.txt file")
                        # open file to read
                        f = open("array.txt", 'r')
                        # read file
                        print(f.read())
                    elif choice == 17:
                        arr = np.array([[1, 2, 3], [4, 5, 6]])
                        print(arr)
                        # contiguous flattened array
                        arr_new = np.ravel(arr)
                        print("Contiguous Flattened array : ", arr_new)
                    elif choice == 18:
                        arr1 = np.array([1, 2, 3])
                        print("Data type of array : ", arr1.dtype)
                        # change data type of an array
                        new_arr = arr1.astype(float)
                        print("Data type of array after change : ", new_arr.dtype)
                    elif choice == 19:
                        # 3x3 array of 0 with 1 on diagonal (Identity matrix)
                        x = np.eye(3)
                        print(x)
                    elif choice == 20:
                        def get_pattern(matrix, row, col):
                            # iterate over rows
                            for r_count in range(0, row):
                                # iterate over cols
                                for c_count in range(0, col):
                                    # if row 1 or more and col less than row print 1
                                    if r_count > 0 and c_count < r_count:
                                        print("1", end=" ")
                                    else:
                                        # print zero's as it is
                                        print(matrix[r_count][c_count], end=" ")
                                print()

                        arr = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]])
                        r = 4
                        c = 3
                        # function call to get above pattern
                        get_pattern(arr, r, c)
                    elif choice == 0:
                        exit()
                    else:
                        print("Enter valid choice")
                else:
                    print("Enter only numbers")
            except Exception as e:
                print(e)
Exemple #46
0
    def _plotInternal(self):
        """Overrides baseclass implementation."""
        # Special case for custom boxfills:
        if self._gm.boxfill_type != "custom":
            self._plotInternalBoxfill()
        else:
            self._plotInternalCustomBoxfill()

        if self._maskedDataMapper is not None:
            self._mappers.insert(0, self._maskedDataMapper)

        x1, x2, y1, y2 = vcs.utils.getworldcoordinates(self._gm,
                                                       self._data1.getAxis(-1),
                                                       self._data1.getAxis(-2))

        # And now we need actors to actually render this thing
        actors = []
        for mapper in self._mappers:
            act = vtk.vtkActor()
            act.SetMapper(mapper)

            if self._vtkGeoTransform is None:
                # If using geofilter on wireframed does not get wrppaed not sure
                # why so sticking to many mappers
                act = vcs2vtk.doWrap(act, [x1, x2, y1, y2],
                                     self._dataWrapModulo)

            # TODO We shouldn't need this conditional branch, the 'else' body
            # should be used and GetMapper called to get the mapper as needed.
            # If this is needed for other reasons, we need a comment explaining
            # why.
            if mapper is self._maskedDataMapper:
                actors.append([act, self._maskedDataMapper, [x1, x2, y1, y2]])
            else:
                actors.append([act, [x1, x2, y1, y2]])

            # create a new renderer for this mapper
            # (we need one for each mapper because of cmaera flips)
            ren = self._context.fitToViewport(
                act, [
                    self._template.data.x1, self._template.data.x2,
                    self._template.data.y1, self._template.data.y2
                ],
                wc=[x1, x2, y1, y2],
                geo=self._vtkGeoTransform,
                priority=self._template.data.priority)

        self._resultDict["vtk_backend_actors"] = actors

        t = self._originalData1.getTime()
        if self._originalData1.ndim > 2:
            z = self._originalData1.getAxis(-3)
        else:
            z = None
        self._resultDict.update(
            self._context.renderTemplate(self._template, self._data1, self._gm,
                                         t, z))

        if getattr(self._gm, "legend", None) is not None:
            self._contourLabels = self._gm.legend

        if self._gm.ext_1:
            if isinstance(self._contourLevels[0], list):
                if numpy.less(abs(self._contourLevels[0][0]), 1.e20):
                    ## Ok we need to add the ext levels
                    self._contourLevels.insert(0, [-1.e20, levs[0][0]])
            else:
                if numpy.less(abs(self._contourLevels[0]), 1.e20):
                    ## need to add an ext
                    self._contourLevels.insert(0, -1.e20)
        if self._gm.ext_2:
            if isinstance(self._contourLevels[-1], list):
                if numpy.less(abs(self._contourLevels[-1][1]), 1.e20):
                    ## need ext
                    self._contourLevels.append(
                        [self._contourLevels[-1][1], 1.e20])
            else:
                if numpy.less(abs(self._contourLevels[-1]), 1.e20):
                    ## need exts
                    self._contourLevels.append(1.e20)

        self._resultDict.update(
            self._context.renderColorBar(self._template, self._contourLevels,
                                         self._contourColors,
                                         self._contourLabels, self._colorMap))

        if self._context.canvas._continents is None:
            self._useContinents = False
        if self._useContinents:
            projection = vcs.elements["projection"][self._gm.projection]
            self._context.plotContinents(x1, x2, y1, y2, projection,
                                         self._dataWrapModulo, self._template)
def get_subset_for_mode(caObj, mode):
  # First prepare possible subsetting of CALIOP datasets according to NSIDC
    # and IGBP surface types  
    if mode == 'ICE_COVER_SEA':
        cal_subset = np.logical_and(
            np.logical_and(np.less_equal(caObj.calipso.nsidc_surface_type,100),
                           np.greater(caObj.calipso.nsidc_surface_type,10)),
            np.equal(caObj.calipso.igbp_surface_type,17))
    elif mode == 'ICE_FREE_SEA':
        cal_subset = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),np.equal(caObj.calipso.igbp_surface_type,17))
    elif mode == 'SNOW_COVER_LAND':
        cal_subset = np.logical_and(
            np.logical_and(np.less(caObj.calipso.nsidc_surface_type,104),
                           np.greater(caObj.calipso.nsidc_surface_type,10)),
            np.not_equal(caObj.calipso.igbp_surface_type,17))
        # Notice that some uncertainty remains about the meaning of IGBP category 15 = "snow and ice". Can this possibly include also the Arctic ice sheet? We hope that it is not!!! However, if it is, the whole classification here might be wrong since this will affect also the definition of IGBP category 17./KG 
    elif mode == 'SNOW_FREE_LAND':
        cal_subset = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),
                                    np.not_equal(caObj.calipso.igbp_surface_type,17))
    elif mode == 'COASTAL_ZONE':
        cal_subset = np.equal(caObj.calipso.nsidc_surface_type,255)
    
    elif mode == 'TROPIC_ZONE':
        cal_subset = np.abs(caObj.calipso.latitude) <= 10
    elif mode == 'TROPIC_ZONE_SNOW_FREE_LAND':
        cal_subset_lat = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),
                                        np.not_equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.abs(caObj.calipso.latitude) <= 10
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    elif mode == 'TROPIC_ZONE_ICE_FREE_SEA':
        cal_subset_lat = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),
                                        np.equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.abs(caObj.calipso.latitude) <= 10
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    
    elif mode == 'SUB_TROPIC_ZONE':
        cal_subset = np.logical_and((np.abs(caObj.calipso.latitude) > 10), 
                                    (np.abs(caObj.calipso.latitude) <= 45))    
    elif mode == 'SUB_TROPIC_ZONE_SNOW_FREE_LAND':
        cal_subset_lat = np.logical_and((np.abs(caObj.calipso.latitude) > 10), 
                                        (np.abs(caObj.calipso.latitude) <= 45))
        cal_subset_area = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),
                                         np.not_equal(caObj.calipso.igbp_surface_type,17))
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    elif mode == 'SUB_TROPIC_ZONE_ICE_FREE_SEA':
        cal_subset_lat = np.logical_and((np.abs(caObj.calipso.latitude) > 10), 
                                        (np.abs(caObj.calipso.latitude) <= 45))
        cal_subset_area = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),
                                         np.equal(caObj.calipso.igbp_surface_type,17))
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    
    elif mode == 'HIGH-LATITUDES':
        cal_subset = np.logical_and((np.abs(caObj.calipso.latitude) > 45), 
                                    (np.abs(caObj.calipso.latitude) <= 75))
    elif mode == 'HIGH-LATITUDES_SNOW_FREE_LAND':
        cal_subset_lat = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),
                                        np.not_equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.logical_and((np.abs(caObj.calipso.latitude) > 45), 
                                         (np.abs(caObj.calipso.latitude) <= 75))
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    elif mode == 'HIGH-LATITUDES_SNOW_COVER_LAND':
        cal_subset_lat = np.logical_and(
            np.logical_and(np.less(caObj.calipso.nsidc_surface_type,104),
                           np.greater(caObj.calipso.nsidc_surface_type,10)),
            np.not_equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.logical_and((np.abs(caObj.calipso.latitude) > 45), (np.abs(caObj.calipso.latitude) <= 75))
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    elif mode == 'HIGH-LATITUDES_ICE_FREE_SEA':
        cal_subset_lat = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),
                                        np.equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.logical_and((np.abs(caObj.calipso.latitude) > 45), 
                                         (np.abs(caObj.calipso.latitude) <= 75))
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    elif mode == 'HIGH-LATITUDES_ICE_COVER_SEA':
        cal_subset_lat = np.logical_and(
            np.logical_and(np.less_equal(caObj.calipso.nsidc_surface_type,100),
                           np.greater(caObj.calipso.nsidc_surface_type,10)),
            np.equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.logical_and((np.abs(caObj.calipso.latitude) > 45), 
                                         (np.abs(caObj.calipso.latitude) <= 75))
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    
    
    elif mode == 'POLAR':
        cal_subset = np.abs(caObj.calipso.latitude) > 75
    elif mode == 'POLAR_SNOW_FREE_LAND':
        cal_subset_lat = np.logical_and(np.equal(caObj.calipso.nsidc_surface_type,0),
                                        np.not_equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.abs(caObj.calipso.latitude) > 75
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    elif mode == 'POLAR_SNOW_COVER_LAND':
        cal_subset_lat = np.logical_and(
            np.logical_and(np.less(caObj.calipso.nsidc_surface_type,104),
                           np.greater(caObj.calipso.nsidc_surface_type,10)),
            np.not_equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.abs(caObj.calipso.latitude) > 75
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    elif mode == 'POLAR_ICE_FREE_SEA':
        cal_subset_lat = np.logical_and(
            np.equal(caObj.calipso.nsidc_surface_type,0),np.equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.abs(caObj.calipso.latitude) > 75
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    elif mode == 'POLAR_ICE_COVER_SEA':
        cal_subset_lat = np.logical_and(
            np.logical_and(np.less_equal(caObj.calipso.nsidc_surface_type,100),
                           np.greater(caObj.calipso.nsidc_surface_type,10)),
            np.equal(caObj.calipso.igbp_surface_type,17))
        cal_subset_area = np.abs(caObj.calipso.latitude) > 75
        cal_subset = np.logical_and(cal_subset_lat, cal_subset_area)
    
    elif mode == 'BASIC':
        cal_subset = np.bool_(np.ones(caObj.calipso.igbp_surface_type.shape))
    elif mode == 'OPTICAL_DEPTH':
        cal_subset = np.bool_(np.ones(caObj.calipso.igbp_surface_type.shape))
    elif mode == 'STANDARD':
        cal_subset = np.bool_(np.ones(caObj.calipso.igbp_surface_type.shape))
    elif mode == 'OPTICAL_DEPTH_THIN_IS_CLEAR':
        cal_subset = np.bool_(np.ones(caObj.calipso.igbp_surface_type.shape))
    else:
        print('The mode %s is not added in statistic file' %mode)
        sys.exit()
    return cal_subset     
Exemple #48
0
    def _numpy(self, data, weights, shape):
        q = self.quantity(data)
        self._checkNPQuantity(q, shape)
        self._checkNPWeights(weights, shape)
        weights = self._makeNPWeights(weights, shape)
        newentries = weights.sum()

        import numpy

        selection = numpy.isnan(q)
        numpy.bitwise_not(selection, selection)
        subweights = weights.copy()
        subweights[selection] = 0.0
        self.nanflow._numpy(data, subweights, shape)

        # avoid nan warning in calculations by flinging the nans elsewhere
        numpy.bitwise_not(selection, selection)
        q = numpy.array(q, dtype=numpy.float64)
        q[selection] = self.high
        weights = weights.copy()
        weights[selection] = 0.0

        numpy.greater_equal(q, self.low, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.underflow._numpy(data, subweights, shape)

        numpy.less(q, self.high, selection)
        subweights[:] = weights
        subweights[selection] = 0.0
        self.overflow._numpy(data, subweights, shape)

        if all(
                isinstance(value, Count) and value.transform is identity
                for value in self.values) and numpy.all(
                    numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):
            # Numpy defines histograms as including the upper edge of the last bin only, so drop that
            weights[q == self.high] == 0.0

            h, _ = numpy.histogram(q,
                                   self.num, (self.low, self.high),
                                   weights=weights)

            for hi, value in zip(h, self.values):
                value.fill(None, float(hi))

        else:
            q = numpy.array(q, dtype=numpy.float64)
            numpy.subtract(q, self.low, q)
            numpy.multiply(q, self.num, q)
            numpy.divide(q, self.high - self.low, q)
            numpy.floor(q, q)
            q = numpy.array(q, dtype=int)

            for index, value in enumerate(self.values):
                numpy.not_equal(q, index, selection)
                subweights[:] = weights
                subweights[selection] = 0.0
                value._numpy(data, subweights, shape)

        # no possibility of exception from here on out (for rollback)
        self.entries += float(newentries)
Exemple #49
0
    def __init__(
        self,
        params,
        lr: float = required,
        beta: float = 0.8,
        step_size: int = None,
        linear: bool = False,
        gamma: float = 1,
        momentum: float = 0,
        dampening: float = 0,
        weight_decay: float = 0,
        nesterov: bool = False,
    ):
        if lr is not required and lr < 0.0:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if momentum < 0.0:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if weight_decay < 0.0:
            raise ValueError(
                "Invalid weight_decay value: {}".format(weight_decay))
        if nesterov and (momentum <= 0 or dampening != 0):
            raise ValueError(
                "Nesterov momentum requires a momentum and zero dampening")
        params = list(params)

        # Specific stuff (not SGD)
        if np.less(beta, 0) or np.greater_equal(beta, 1):
            raise ValueError(f'Invalid beta: {beta}')
        if np.less(gamma, 0):
            raise ValueError(f'Invalid gamma: {gamma}')
        if step_size is not None:
            if np.less_equal(step_size, 0):
                raise ValueError(f'Invalid step_size: {step_size}')
        self.step_size = step_size
        self.gamma = gamma
        self.beta = beta
        self.metrics = metrics = Metrics(params=params, linear=linear)
        self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params))
        self.velocity = np.ones(
            len(self.metrics.params) - len(self.metrics.mask)) * lr
        self.not_ready = list(range(len(self.velocity)))
        self.init_lr = lr
        self.zeta = 1.
        self.KG = 0.
        self.epoch = 0
        defaults = dict(lr=lr,
                        momentum=momentum,
                        dampening=dampening,
                        weight_decay=weight_decay,
                        nesterov=nesterov,
                        KG=self.KG,
                        step_size=step_size,
                        gamma=gamma,
                        beta=beta,
                        metrics=self.metrics,
                        lr_vector=self.lr_vector,
                        velocity=self.velocity,
                        not_ready=self.not_ready,
                        init_lr=self.init_lr,
                        zeta=self.zeta)
        super(RMSGD, self).__init__(params, defaults)
Exemple #50
0
def main(inimg):

    basedir = os.path.dirname(inimg)

    if not os.path.exists(inimg):
        print('File %s does not exist' % (inimg))
        sys.exit(0)
    if not os.path.exists(basedir):
        print('Directory %s does not exist' % (basedir))
        sys.exit(0)

    outimg = basedir + os.path.sep + os.path.splitext(
        os.path.basename(inimg))[0] + '_masked.tif'

    ## inimg = '../AscendingDescending/20200323_to_20200330/descending/L15-1866E-0911N.tif'
    inDS = gdal.Open(inimg, gdal.GA_ReadOnly)
    ingt = inDS.GetGeoTransform()
    inproj = inDS.GetProjection()
    xsize = inDS.RasterXSize
    ysize = inDS.RasterYSize
    ulx = ingt[0]
    uly = ingt[3]
    lrx = ingt[0] + (xsize * ingt[1])
    lry = ingt[3] + (ysize * ingt[5])

    ## clavgimg = '/scratch/dknapp4/GBR/Clouds/landsat8_mean.vrt'
    clavgimg = '/data/gdcsdata/Research/Researcher/Knapp/Hawaii_Weekly/Landsat_clean/hawaii_l8_mean.vrt'
    claDS = gdal.Open(clavgimg, gdal.GA_ReadOnly)
    clagt = claDS.GetGeoTransform()
    xsizecla = claDS.RasterXSize
    ysizecla = claDS.RasterYSize
    clagt = claDS.GetGeoTransform()

    ## clstdimg = '/scratch/dknapp4/GBR/Clouds/landsat8_stdev.vrt'
    clstdimg = '/data/gdcsdata/Research/Researcher/Knapp/Hawaii_Weekly/Landsat_clean/hawaii_l8_stdev.vrt'
    clsDS = gdal.Open(clstdimg, gdal.GA_ReadOnly)
    clsgt = clsDS.GetGeoTransform()
    xsizecls = clsDS.RasterXSize
    ysizecls = clsDS.RasterYSize
    clsgt = clsDS.GetGeoTransform()

    claDS, clsDS = None, None

    randit = str('%06d' % (np.random.randint(0, 999999, 1)[0]))
    l8avg = 'temp_avg_' + randit + '.tif'
    l8std = 'temp_std_' + randit + '.tif'

    mylist1 = [clavgimg, clstdimg]
    mylist2 = [l8avg, l8std]

    for j in [0, 1]:
        commdove1 = 'gdalwarp -of GTiff -r near -te '
        commdove2 = '%12.2f %12.2f %12.2f %12.2f -tr %6.2f %6.2f %s %s' % (
            ulx, lry, lrx, uly, clagt[1], clagt[1], mylist1[j], mylist2[j])
        myargs = (commdove1 + commdove2).split()
        complete = subprocess.run(myargs, check=True)

    outfile = os.path.splitext(
        os.path.basename(inimg))[0] + '_' + randit + '_l8_sub.tif'
    commdove1 = 'gdalwarp -of GTiff -r average -te '
    commdove2 = '%12.2f %12.2f %12.2f %12.2f -tr %6.2f %6.2f %s %s' % (
        ulx, lry, lrx, uly, clagt[1], clagt[1], inimg, outfile)
    myargs = (commdove1 + commdove2).split()
    complete = subprocess.run(myargs, check=True)

    clearavgDS = gdal.Open(l8avg, gdal.GA_ReadOnly)
    clearavg1 = clearavgDS.GetRasterBand(1)
    clearavg2 = clearavgDS.GetRasterBand(2)
    clearavg3 = clearavgDS.GetRasterBand(3)

    clearstdDS = gdal.Open(l8std, gdal.GA_ReadOnly)
    clearstd1 = clearstdDS.GetRasterBand(1)
    clearstd2 = clearstdDS.GetRasterBand(2)
    clearstd3 = clearstdDS.GetRasterBand(3)

    pDS = gdal.Open(outfile, gdal.GA_ReadOnly)
    planet1 = pDS.GetRasterBand(1)
    planet2 = pDS.GetRasterBand(2)
    planet3 = pDS.GetRasterBand(3)

    drv = gdal.GetDriverByName('GTiff')

    smmask = np.zeros((pDS.RasterYSize, pDS.RasterXSize), dtype=np.bool)

    for i in range(pDS.RasterYSize):
        line1 = planet1.ReadAsArray(0, i, pDS.RasterXSize, 1)
        line2 = planet2.ReadAsArray(0, i, pDS.RasterXSize, 1)
        line3 = planet3.ReadAsArray(0, i, pDS.RasterXSize, 1)
        plines = np.stack((line1.squeeze(), line2.squeeze(), line3.squeeze()))
        pavg = np.mean(plines, axis=0)
        pgood = np.greater(np.sum(plines, axis=0), 0)

        clearline1 = clearavg1.ReadAsArray(0, i, clearavgDS.RasterXSize, 1)
        clearline2 = clearavg2.ReadAsArray(0, i, clearavgDS.RasterXSize, 1)
        clearline3 = clearavg3.ReadAsArray(0, i, clearavgDS.RasterXSize, 1)
        llines = np.stack(
            (clearline1.squeeze(), clearline2.squeeze(), clearline3.squeeze()))
        lavg = np.mean(llines, axis=0)
        lgood = np.greater(np.sum(llines, axis=0), 0)

        clstdline1 = clearstd1.ReadAsArray(0, i, clearavgDS.RasterXSize, 1)
        clstdline2 = clearstd2.ReadAsArray(0, i, clearavgDS.RasterXSize, 1)
        clstdline3 = clearstd3.ReadAsArray(0, i, clearavgDS.RasterXSize, 1)
        slines = np.stack(
            (clstdline1.squeeze(), clstdline2.squeeze(), clstdline3.squeeze()))
        lstd = np.mean(slines, axis=0)

        good = np.logical_and(pgood, lgood)

        sqdiff1 = np.power(plines[0, good] - llines[0, good], 2)
        sqdiff2 = np.power(plines[1, good] - llines[1, good], 2)
        sqdiff3 = np.power(plines[2, good] - llines[2, good], 2)

        dist = np.sqrt(sqdiff1 + sqdiff2 + sqdiff3)
        out = np.zeros(pDS.RasterXSize, dtype=np.float32)
        out[good] = dist
        notdark = np.less((lavg[good] - (lstd[good] * 0.5)), pavg[good])
        mask = np.logical_and(np.less(out[good], 800.0), notdark)
        smmask[i, good] = mask.astype(np.uint8)

    pDS, clearavgDS, clearstdDS = None, None, None

    os.remove(outfile)
    os.remove(l8avg)
    os.remove(l8std)

    ## Mask the Dove image

    ## inimg = '../AscendingDescending/20200323_to_20200330/descending/L15-1866E-0911N.tif'
    inDS = gdal.Open(inimg, gdal.GA_ReadOnly)
    ingt = inDS.GetGeoTransform()
    inproj = inDS.GetProjection()
    xsize = inDS.RasterXSize
    ysize = inDS.RasterYSize
    ulx = ingt[0]
    uly = ingt[3]

    ## bigmask = congrid(smmask, [4096, 4096], method='nearest', centre=True, minusone=True)
    bigmask = resize(smmask, (inDS.RasterYSize, inDS.RasterXSize))
    bigmask = bigmask.astype(np.bool)
    mydisk = disk(15, dtype=np.bool)
    berode = binary_erosion(bigmask, mydisk).astype(np.uint8)

    mDS = drv.Create(outimg,
                     inDS.RasterXSize,
                     inDS.RasterYSize,
                     inDS.RasterCount,
                     eType=inDS.GetRasterBand(1).DataType,
                     options=['COMPRESS=LZW'])
    mDS.SetGeoTransform(ingt)
    mDS.SetProjection(inproj)

    for b in range(inDS.RasterCount):
        data = inDS.GetRasterBand(b + 1).ReadAsArray()
        mask = np.equal(berode, 0)
        data[mask] = 0
        mDS.GetRasterBand(b + 1).WriteArray(data)

    inDS, mDS = None, None
Exemple #51
0
def comparewithnumber(a, bound):
    return np.less(a, bound)
def extractHarmSpec(inputFile='/home/georgid/Documents/iKala/Wavfile/mono/31112_chorus.wav', window='blackman', M=601, N=1024, t=-100, 
	minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01):
	"""
	Perform analysis/synthesis using the harmonic plus residual model
	inputFile: input sound file (monophonic with sampling rate of 44100)
	window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)	
	M: analysis window size; N: fft size (power of two, bigger or equal than M)
	t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
	nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
	maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm                                                                                            
	harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
	"""

	# size of fft used in synthesis
	Ns = 512

	# hop size (has to be 1/4 of Ns)
	H = 128

	# read input sound
	(fs, x) = UF.wavread(inputFile)

	# compute analysis window
	w = get_window(window, M)

	# find harmonics and residual
# 	HPR.hprModel(x, fs, w, N, t, nH, minf0, maxf0, f0et)
	hfreq, hmag, hphase, xr = HPR.hprModelAnal(x, fs, w, N, H, t, minSineDur, nH, minf0, maxf0, f0et, harmDevSlope)

	# compute spectrogram of residual
	mXr, pXr = STFT.stftAnal(xr, fs, w, N, H)
	  
	# synthesize hpr model
	y, yh = HPR.hprModelSynth(hfreq, hmag, hphase, xr, Ns, H, fs)

	# output sound file (monophonic with sampling rate of 44100)
	outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel_sines.wav'
	outputFileResidual = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel_residual.wav'
	outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel.wav'

	# write sounds files for harmonics, residual, and the sum
	UF.wavwrite(yh, fs, outputFileSines)
	UF.wavwrite(xr, fs, outputFileResidual)
	UF.wavwrite(y, fs, outputFile)

	# create figure to plot
	plt.figure(figsize=(12, 9))

	# frequency range to plot
	maxplotfreq = 5000.0

	# plot the input sound
	plt.subplot(3,1,1)
	plt.plot(np.arange(x.size)/float(fs), x)
	plt.axis([0, x.size/float(fs), min(x), max(x)])
	plt.ylabel('amplitude')
	plt.xlabel('time (sec)')
	plt.title('input sound: x')

	# plot the magnitude spectrogram of residual
	plt.subplot(3,1,2)
	maxplotbin = int(N*maxplotfreq/fs)
	numFrames = int(mXr[:,0].size)
	frmTime = H*np.arange(numFrames)/float(fs)                       
	binFreq = np.arange(maxplotbin+1)*float(fs)/N                         
	plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
	plt.autoscale(tight=True)

	# plot harmonic frequencies on residual spectrogram
	if (hfreq.shape[1] > 0):
		harms = hfreq*np.less(hfreq,maxplotfreq)
		harms[harms==0] = np.nan
		numFrames = int(harms[:,0].size)
		frmTime = H*np.arange(numFrames)/float(fs) 
		plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
		plt.xlabel('time(s)')
		plt.ylabel('frequency(Hz)')
		plt.autoscale(tight=True)
		plt.title('harmonics + residual spectrogram')

	# plot the output sound
	plt.subplot(3,1,3)
	plt.plot(np.arange(y.size)/float(fs), y)
	plt.axis([0, y.size/float(fs), min(y), max(y)])
	plt.ylabel('amplitude')
	plt.xlabel('time (sec)')
	plt.title('output sound: y')

	plt.tight_layout()
	plt.show()
Exemple #53
0
def polygonize_thresholds(raster_file_name,
                          threshold_min=0.0,
                          threshold_max=float('inf')):
    """
    Function to polygonize raster. Areas (pixels) with threshold_min <
    pixel_values < threshold_max will be converted to polygons.

    :param raster_file_name:  Raster file name
    :type raster_file_name: string

    :param threshold_min: Value that splits raster to
                    flooded or not flooded.
    :type threshold_min: float

    :param threshold_max: Value that splits raster to
                    flooded or not flooded.
    :type threshold_max: float

    :returns:   Polygon shape file name
    :rtype:     string

    """

    # all values that are in the threshold are set to 1, others are set to 0
    base_name = unique_filename()
    outfile = base_name + '.tif'

    indataset = gdal.Open(raster_file_name, gdal.GA_ReadOnly)
    out_driver = gdal.GetDriverByName('GTiff')
    outdataset = out_driver.Create(outfile, indataset.RasterXSize,
                                   indataset.RasterYSize,
                                   indataset.RasterCount, gdal.GDT_Byte)

    gt = indataset.GetGeoTransform()
    if gt is not None and gt != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0):
        outdataset.SetGeoTransform(gt)
    prj = indataset.GetProjectionRef()
    if prj is not None and len(prj) > 0:
        outdataset.SetProjection(prj)

    outNoData = 1
    for iBand in range(1, indataset.RasterCount + 1):
        inband = indataset.GetRasterBand(iBand)
        outband = outdataset.GetRasterBand(iBand)

        for i in range(inband.YSize - 1, -1, -1):
            scanline = inband.ReadAsArray(0, i, inband.XSize, 1, inband.XSize,
                                          1)

            if threshold_min >= 0:
                scanline = \
                    numpy.choose(numpy.less(scanline, float(threshold_min)),
                                 (scanline, 0))

            if threshold_max > 0 and threshold_max > threshold_min:
                scanline = \
                    numpy.choose(numpy.greater(scanline, float(threshold_max)),
                                 (scanline, 0))

            scanline = numpy.choose(numpy.not_equal(scanline, 0),
                                    (scanline, outNoData))
            outband.WriteArray(scanline, 0, i)

    # polygonize
    spat_ref = osr.SpatialReference()
    proj = indataset.GetProjectionRef()
    spat_ref.ImportFromWkt(proj)
    drv = ogr.GetDriverByName("ESRI Shapefile")
    base_name = unique_filename()
    out_shape_file = base_name + ".shp"

    dst_ds = drv.CreateDataSource(out_shape_file)
    # ogr_layer_name = 'polygonized'
    ogr_layer_name = os.path.splitext(os.path.split(out_shape_file)[1])[0]
    dst_layer = dst_ds.CreateLayer(ogr_layer_name, spat_ref)
    # fd = ogr.FieldDefn("DN", ogr.OFTInteger )
    fd = ogr.FieldDefn("DN", ogr.OFTReal)
    dst_layer.CreateField(fd)
    dst_field = 0

    # gdal.Polygonize(
    #     outband, outband, dst_layer, dst_field, [], callback=None)
    gdal.Polygonize(outband, None, dst_layer, dst_field, [], callback=None)

    # produce in and out polygon layers
    base_name = unique_filename()
    inside_shape_file = base_name + "_inside.shp"
    inside_layer_name = \
        os.path.splitext(os.path.split(inside_shape_file)[1])[0]

    outside_shape_file = base_name + "_outside.shp"
    outside_layer_name = \
        os.path.splitext(os.path.split(outside_shape_file)[1])[0]

    inside_ds = drv.CreateDataSource(inside_shape_file)
    inside_layer = inside_ds.CreateLayer(inside_layer_name, spat_ref)

    outside_ds = drv.CreateDataSource(outside_shape_file)
    outside_layer = inside_ds.CreateLayer(outside_layer_name, spat_ref)

    for feature in dst_layer:
        value = feature.GetField("DN")
        geom = feature.GetGeometryRef()
        if value == 1:
            new_feature = ogr.Feature(inside_layer.GetLayerDefn())
            new_feature.SetGeometry(geom)
            inside_layer.CreateFeature(new_feature)
        else:
            new_feature = ogr.Feature(outside_layer.GetLayerDefn())
            new_feature.SetGeometry(geom)
            outside_layer.CreateFeature(new_feature)

    inside_ds.Destroy()
    outside_ds.Destroy()
    dst_ds.Destroy()
    return (inside_shape_file, inside_layer_name, outside_shape_file,
            outside_layer_name)
Exemple #54
0
def project_images(data_path, image_names, mesh_vertices, args, model2d_fixed,
                   model2d_trainable, projection, maskrcnn_model, scene_id):
    dict = torch.load("/home/davech2y/adl/lib/data_2d/" + scene_id + ".dictt")
    depth_images = dict["depth"]
    camera_poses = dict["camera"]
    depth_images = torch.stack(depth_images)
    camera_poses = torch.stack(camera_poses)
    # boundingmin, boundingmax, world_to_camera = projection.compute_projection_multi(camera_poses)
    world_to_camera = projection.compute_projection_multi(camera_poses)
    lin_ind_volume = np.arange(0, mesh_vertices.shape[0], dtype=np.int)
    N = mesh_vertices.shape[0]
    mesh_vertices_original = mesh_vertices.copy()
    mesh_vertices_original = np.transpose(mesh_vertices_original)
    mesh_vertices_original = torch.from_numpy(
        mesh_vertices_original).float().to(device).cuda()
    mesh_vertices = mesh_vertices[:, 0:4]
    mesh_vertices = np.transpose(mesh_vertices)
    # boundingmax = boundingmax.cpu().numpy()
    # boundingmin = boundingmin.cpu().numpy()
    world_to_camera = world_to_camera.cuda()
    mesh_vertices = np.expand_dims(mesh_vertices, 0)
    mesh_vertices = np.repeat(mesh_vertices, len(image_names), axis=0)
    # mask_frustum_bounds = np.greater_equal(mesh_vertices[:,0], np.expand_dims(boundingmin[:,0],1)) * np.greater_equal(mesh_vertices[:,1], np.expand_dims(boundingmin[:,1],1)) * np.greater_equal(mesh_vertices[:,2], np.expand_dims(boundingmin[:,2],1))
    # mask_frustum_bounds = mask_frustum_bounds * np.less(mesh_vertices[:,0], np.expand_dims(boundingmax[:,0],1)) * np.less(mesh_vertices[:,1], np.expand_dims(boundingmax[:,1],1)) * np.less(mesh_vertices[:,2], np.expand_dims(boundingmax[:,2],1))
    # if not mask_frustum_bounds.any():
    # print('error: nothing in frustum bounds')
    #     return None
    lin_ind_volume = np.expand_dims(lin_ind_volume, 0)
    lin_ind_volume = np.repeat(lin_ind_volume, len(image_names), axis=0)
    # lin_ind_volume = lin_ind_volume[mask_frustum_bounds]
    world_to_camera = world_to_camera.cpu().numpy()
    mesh_vertices = np.matmul(world_to_camera, mesh_vertices)
    # mesh_vertices = torch.bmm(world_to_camera, mesh_vertices)
    # transform to current frame
    mesh_vertices = np.moveaxis(mesh_vertices, 0, -2)
    # mesh_vertices = mesh_vertices.permute(1, 0, 2)
    # p = p[:,mask_frustum_bounds]
    # project into image
    mesh_vertices[0] = (mesh_vertices[0] * projection.intrinsic[0][0]
                        ) / mesh_vertices[2] + projection.intrinsic[0][2]
    mesh_vertices[1] = (mesh_vertices[1] * projection.intrinsic[1][1]
                        ) / mesh_vertices[2] + projection.intrinsic[1][2]
    pi = np.around(mesh_vertices).astype(np.int)
    valid_ind_mask = np.greater_equal(pi[0, :], 0) * np.greater_equal(
        pi[1, :], 0) * np.less(pi[0, :], proj_image_dims[0]) * np.less(
            pi[1, :], proj_image_dims[1])
    if not valid_ind_mask.any():
        print('error: no valid image indices')
        return None
    valid_ind_mask = valid_ind_mask
    pi = pi * valid_ind_mask.astype(np.int)
    image_ind_x = pi[0, :]
    image_ind_y = pi[1, :]
    image_ind_lin = image_ind_y * proj_image_dims[0] + image_ind_x
    depth = depth_images.detach().cpu().numpy()
    depth_vals = np.concatenate([
        np.expand_dims(np.take(a.reshape(-1), i, 0), 0)
        for a, i in zip(depth, image_ind_lin)
    ])
    depth_mask = np.greater_equal(depth_vals, args.depth_min) * np.less_equal(
        depth_vals, args.depth_max) * np.less_equal(
            np.absolute(depth_vals - mesh_vertices[2] *
                        valid_ind_mask.astype(float)), args.voxel_size)

    if not depth_mask.any():
        # print('error: no valid depths')
        return None
    final_mask = (valid_ind_mask * depth_mask)
    lin_indices_3d = [a[i] for a, i in zip(lin_ind_volume, final_mask)]
    lin_indices_2d = [
        np.take(a,
                np.argwhere(i)[:, 0], 0)
        for a, i in zip(image_ind_lin, final_mask)
    ]
    # use enet features
    # features_to_add = get_features_enet_multi([os.path.join(data_path, 'color', image_name+".jpg") for image_name in image_names], device, model2d_fixed, model2d_trainable)
    # use mask r cnn features
    features_to_add = get_features_for_projection_multi([
        os.path.join(data_path, 'color', image_name + ".jpg")
        for image_name in image_names
    ], device, maskrcnn_model, scene_id, args)
    num_label_ft = 1 if len(
        features_to_add[0].shape) == 2 else features_to_add[0].shape[0]
    output = features_to_add[0].new(4 + num_label_ft, N).fill_(0)
    output[0:4, :] = mesh_vertices_original[0:4, :]
    # output = output.detach().cpu().numpy()
    #    num_ind = lin_indices_3d[0]
    #    if num_ind > 0:
    # features_to_add = [feature.cpu().numpy() for feature in features_to_add]
    for feature, lin_index_2d, lin_index_3d in zip(features_to_add,
                                                   lin_indices_2d,
                                                   lin_indices_3d):
        # vals = np.take(feature.reshape(num_label_ft, -1), lin_index_2d, 1)
        # output.reshape(num_label_ft+4, -1)[4:, lin_index_3d] = vals
        lin_index_2d = torch.tensor(lin_index_2d).cuda()
        lin_index_3d = torch.tensor(lin_index_3d).cuda()
        vals = torch.index_select(feature.view(num_label_ft, -1), 1,
                                  lin_index_2d)
        output.view(num_label_ft + 4, -1)[4:, lin_index_3d] = vals
    output = torch.transpose(output, 0, 1)
    return output
Exemple #55
0
def get_interval(timedata, t0, t1):
    mask = np.logical_and(np.greater_equal(timedata, t0),
                          np.less(timedata, t1))
    return timedata[mask]
def recover_or_die(population, frame, Config):
    '''see whether to recover or die


    Keyword arguments
    -----------------
    population : ndarray
        array containing all data on the population

    frame : int
        the current timestep of the simulation

    recovery_duration : tuple
        lower and upper bounds of duration of recovery, in simulation steps

    mortality_chance : float
        the odds that someone dies in stead of recovers (between 0 and 1)

    risk_age : int or flaot
        the age from which mortality risk starts increasing

    critical_age: int or float
        the age where mortality risk equals critical_mortality_change

    critical_mortality_chance : float
        the heightened odds that an infected person has a fatal ending

    risk_increase : string
        can be 'quadratic' or 'linear', determines whether the mortality risk
        between the at risk age and the critical age increases linearly or
        exponentially

    no_treatment_factor : int or float
        defines a change in mortality odds if someone cannot get treatment. Can
        be larger than one to increase risk, or lower to decrease it.

    treatment_dependent_risk : bool
        whether availability of treatment influences patient risk

    treatment_factor : int or float
        defines a change in mortality odds if someone is in treatment. Can
        be larger than one to increase risk, or lower to decrease it.

    verbose : bool
        whether to report to terminal the recoveries and deaths for each simulation step
    '''

    #find infected people
    infected_people = population[population[:,6] == 1]

    #define vector of how long everyone has been sick
    illness_duration_vector = frame - infected_people[:,8]

    #update severity
    can_progress = np.less(infected_people[:,15], infected_people[:,18])
    is_progression_step = np.sum(np.tile(illness_duration_vector, (3,1)).T == Config.infection_progression_duration, axis=1) == 1
    to_progress = np.logical_and(is_progression_step, can_progress)
    infected_people[:,15][to_progress] += 1
    
    recovery_odds_vector = (illness_duration_vector - Config.recovery_duration[0]) / np.ptp(Config.recovery_duration)
    recovery_odds_vector = np.clip(recovery_odds_vector, a_min = 0, a_max = None)

    #update states of sick people 
    indices = infected_people[:,0][recovery_odds_vector >= infected_people[:,9]]

    recovered = []
    fatalities = []

    #decide whether to die or recover
    for idx in indices:
        #check if we want risk to be age dependent
        #if age_dependent_risk:
        if Config.age_dependent_risk:
            updated_mortality_chance = compute_mortality(infected_people[infected_people[:,0] == idx][:,7][0], 
                                                            Config.mortality_chance,
                                                            Config.risk_age, Config.critical_age, 
                                                            Config.critical_mortality_chance, 
                                                            Config.risk_increase)
        else:
            updated_mortality_chance = Config.mortality_chance

        if infected_people[infected_people[:,0] == int(idx)][:,10] == 0 and Config.treatment_dependent_risk:
            #if person is not in treatment, increase risk by no_treatment_factor
            updated_mortality_chance = updated_mortality_chance * Config.no_treatment_factor
        elif infected_people[infected_people[:,0] == int(idx)][:,10] == 1 and Config.treatment_dependent_risk:
            #if person is in treatment, decrease risk by 
            updated_mortality_chance = updated_mortality_chance * Config.treatment_factor
        
        # if symptoms are not severe, don't die
        if infected_people[infected_people[:,0] == int(idx)][:,15] != 2:
            updated_mortality_chance = 0

        if np.random.random() <= updated_mortality_chance:
            #die
            infected_people[:,6][infected_people[:,0] == idx] = 3
            infected_people[:,10][infected_people[:,0] == idx] = 0
            fatalities.append(np.int32(infected_people[infected_people[:,0] == idx][:,0][0]))
        else:
            #recover (become immune)
            infected_people[:,6][infected_people[:,0] == idx] = 2
            infected_people[:,10][infected_people[:,0] == idx] = 0
            recovered.append(np.int32(infected_people[infected_people[:,0] == idx][:,0][0]))

    if len(fatalities) > 0 and Config.verbose:
        print('\nat timestep %i these people died: %s' %(frame, fatalities))
    if len(recovered) > 0 and Config.verbose:
        print('\nat timestep %i these people recovered: %s' %(frame, recovered))

    #put array back into population
    population[population[:,6] == 1] = infected_people

    return population
Exemple #57
0
def z_norm(a, axis=0, threshold=1e-7):
    std = np.std(a, axis, keepdims=True)
    std[np.less(std, threshold, where=~np.isnan(std))] = 1.0

    return (a - np.mean(a, axis, keepdims=True)) / std
Exemple #58
0
        # Search for the luminance contrast level at half maximum response. A
        # while loop is more practical than an analytic solution - it is easy
        # to implement and reliable because of the contraint nature of the
        # problem. The problem is contraint because the luminance contrast has
        # to be between zero and one.

        # Initial value for the contrast level (will be incremented until the
        # half maximum response is reached).
        varHlfMaxCont = 0.0

        # Initial value for the resposne.
        varRespTmp = 0.0

        # Increment the contrast level until the half maximum response is
        # reached:
        while np.less(varRespTmp, varResp50):
            varHlfMaxCont += 0.000001
            if strFunc == 'power':
                varRespTmp = crf_power(varHlfMaxCont, vecMdlPar[0],
                                       vecMdlPar[1])
            elif strFunc == 'hyper':
                varRespTmp = crf_hyper(varHlfMaxCont, vecMdlPar[0],
                                       vecMdlPar[1], vecMdlPar[2])
        lstHlfMaxCont[idxIn][0, idxDpth] = varHlfMaxCont

        # --------------------------------------------------------------------
        # *** Calculate residual variance

        # In order to assess the fit of the model, we calculate the deviation
        # of the measured response from the fitted model (average across
        # conditions). First we have to calculate the deviation for each
Exemple #59
0
def decodeData(inp):

    """

    Generates decoded (RGB) images from 8 channel data

    

    Input:

       - inp: data of shape (N, C, H, W) (range: 0 or 1)

             where C = 9 (Y) or 1 (X)

                    

    Output:

       - out: decoded (RGB) data (N, 3, H, W) (range: 0-255)

    """


    # Define RGB colors (http://www.rapidtables.com/web/color/RGB_Color.html)

    M6_clr    = np.array([[  0.0, 191.0, 255.0]]) # layer 7  m6    dodger blue

    VIA5_clr  = np.array([[255.0,   0.0,   0.0]]) # layer 6  via5  red

    M5_clr    = np.array([[169.0, 169.0, 169.0]]) # layer 5  m5    dark gray

    VIA4_clr  = np.array([[  0.0, 255.0,   0.0]]) # layer 4  via4  green

    M4_clr    = np.array([[255.0,  99.0,  71.0]]) # layer 3  m4    tomato red

    VIA3_clr  = np.array([[  0.0,   0.0, 255.0]]) # layer 2  via3  blue

    M3_clr    = np.array([[ 50.0, 205.0,  50.0]]) # layer 1  m3    lime green

    PIN_clr   = np.array([[  0.0,   0.0,   0.0]]) # layer 0  pin   black

    BGND_clr  = np.array([[255.0, 255.0, 255.0]]) # background     white

    blockage_clr = np.array([[255.0, 215.0, 0.0]]) # layer 8 blockage  gold    



    # If inp is 1 image, reshape

    if (len(inp.shape) == 3):

        C, H, W = inp.shape
        
        inp = inp.reshape(1, C, H, W)



    N, C, H, W = inp.shape

    D = N * H * W



    # Warn if any input elements are out of range

    if np.any(np.less(inp, 0)):

        print("Invalid input range detected (some input elements < 0)")



    # Initialize pseudo output & activepixelcount matrix

    out = np.zeros([N, 3, H, W])



    inp_swap = np.swapaxes(inp, 1, 0).reshape(C, D)   # (C, N*H*W)

    out_swap = np.swapaxes(out, 1, 0).reshape(3, D)   # (3, N*H*W)

    

    if C != 1:
        
        #layer 8 processing

        temp8 = np.broadcast_to((inp_swap[8, :] == 1), (3,D))

        out_swap += blockage_clr.T * temp8

        

        #layer 7 processing

        temp7 = np.broadcast_to((inp_swap[7, :] == 1), (3, D))

        out_swap += M6_clr.T * temp7

        



        #layer 6 processing

        temp6 = np.broadcast_to((inp_swap[6, :] == 1), (3, D)) 

        out_swap += VIA5_clr.T * temp6

        



        #layer 5 processing

        temp5 = np.broadcast_to((inp_swap[5, :] == 1), (3, D))

        out_swap += M5_clr.T * temp5

        



        #layer 4 processing

        temp4 = np.broadcast_to((inp_swap[4, :] == 1), (3, D))

        out_swap += VIA4_clr.T * temp4

        



        #layer 3 processing

        temp3 = np.broadcast_to((inp_swap[3, :] == 1), (3, D))

        out_swap += M4_clr.T * temp3

        



        #layer 2 processing

        temp2 = np.broadcast_to((inp_swap[2, :] == 1), (3, D))

        out_swap += VIA3_clr.T * temp2

        



        #layer 1 processing

        temp1 = np.broadcast_to((inp_swap[1, :] == 1), (3, D))

        out_swap += M3_clr.T * temp1

        


    #layer 0 processing
    temp0 = np.broadcast_to((inp_swap[0, :] == 1), (3, D))

    out_swap += PIN_clr.T * temp0

  

    

    # For every pixel, count the number of active layers that overlap

    overlap = 1.0 * np.sum(inp_swap, axis=0, keepdims=True)
    n_layers = overlap + (overlap == 0.0) * 1.0    # To avoid division by zero


    # If no active layers, fill in white

    temp_white = np.broadcast_to((overlap == 0), (3, D))

    out_swap += BGND_clr.T * temp_white



    # Average the colors for all active layers

    out_swap /= (n_layers * 1.0)

    

    out_swap = out_swap.reshape(3, N, H, W)

    out = np.swapaxes(out_swap, 1, 0)    # (N, 3, H, W)



    # Warn if any output elements are out of range

    if np.any(np.greater(out, 255)):

        print ("Invalid output range detected (some output elements > 255)")

    if np.any(np.less(out, 0)):

        print ("Invalid output range detected (some output elements < 0)")



    return out
Exemple #60
0
def embed_raster(input_fg,
                 input_bg,
                 output,
                 nsmooth_init=2,
                 nsmooth_final=1,
                 plot=False,
                 max_time_init=3,
                 max_time_final=1,
                 nstep=50,
                 report_interval=1,
                 **kwargs):
    """ Embed a smoother DEM in a coarser
    
        The two inputs and output are filenames. The basic plans is this:
        1. Smooth the fine data enough to resample without aliasing or distortion
        2. Interpolate/resample the result to the coarser mesh
        3. Where the result of 1-2 has good data, replace coarser data
        4. Smooth the final grid lightly to remove kinks/discontinuity
        
        The smoothing is done with contour_smooth2d
    """
    from nodepy import runge_kutta_method as rk
    from nodepy import ivp
    ds_fine = RasterWrapper(input_fg)
    cols = ds_fine.nx
    rows = ds_fine.ny
    nd = ds_fine.no_data
    dx = ds_fine.dx
    dem = ds_fine.dem
    origin = ds_fine.origin

    scales = np.arange(1, nsmooth_init + 1)

    #todo: whether this test is legit depends on context. fine for DEM
    if nd < -1e16:
        dem[dem < -1e16] = np.nan
    dem_fine = contour_smooth2d(dem, scales, max_time_init, nstep,
                                report_interval)
    x_fine = origin[0] + dx[0] * (0.5 + np.arange(cols))
    y_fine = origin[1] + dx[1] * (0.5 + np.arange(rows))
    print("Start interp")
    import scipy.interpolate as si
    fine_good = np.where(np.isnan(dem_fine), 0., 1.)

    # this filling is for the interpolator, undone later
    # the nan values will not be used to fill the bg grid
    dem_fine[np.isnan(dem_fine)] = np.nanmean(dem_fine)
    f = si.interp2d(x_fine, y_fine, dem_fine, fill_value=np.nan)
    f2 = si.interp2d(x_fine, y_fine, fine_good, fill_value=np.nan)
    fg2 = f2(x_fine, y_fine)

    print("End interp")

    ds_coarse = RasterWrapper(input_bg)

    cols = ds_coarse.nx
    rows = ds_coarse.ny
    dem_coarse = ds_coarse.dem
    nd = ds_coarse.no_data
    dx_coarse = ds_coarse.dx
    origin_coarse = ds_coarse.origin

    x_coarse = origin_coarse[0] + dx_coarse[0] * (0.5 + np.arange(cols))
    y_coarse = origin_coarse[1] + dx_coarse[1] * (0.5 + np.arange(rows))
    dem_interp = f(x_coarse, y_coarse, assume_sorted=False)
    dem_interp2 = f2(x_coarse, y_coarse, assume_sorted=False)
    dem_interp[np.less(dem_interp2, 0.99)] = np.nan

    #dem_mixed = dem_interp2
    dem_mixed = np.where(np.isnan(dem_interp[::-1, :]), dem_coarse,
                         dem_interp[::-1, :])

    scales = np.arange(1, nsmooth_final + 1)
    dem_final = contour_smooth2d(dem_mixed, scales, max_time_final, nstep,
                                 report_interval)

    if plot:
        fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2,
                                                     2,
                                                     sharex=True,
                                                     sharey=True)

        levels = [-24, -20, -16, -8, -4, -2, -1, 0, 1, 2, 4]
        import matplotlib
        vmin = -24
        vmax = 6
        matplotlib.rcParams['contour.negative_linestyle'] = 'solid'

        ax0.imshow(dem_final,
                   vmin=vmin,
                   vmax=vmax,
                   origin='upper',
                   extent=ds_coarse.extent)
        ax0.set_title("Final 10m")
        cs0 = ax0.contour(dem_final,
                          levels,
                          origin='upper',
                          colors='k',
                          extent=ds_coarse.extent,
                          linewidths=1,
                          antialiased=True)

        #ax1.imshow(dem_interp[::-1,:],vmin=-20,vmax=6)
        ax1.set_title("Original 10m DEM")
        ax1.imshow(dem_coarse,
                   vmin=vmin,
                   vmax=vmax,
                   origin='upper',
                   extent=ds_coarse.extent)
        cs1 = ax1.contour(dem_coarse,
                          levels,
                          origin='upper',
                          colors='k',
                          extent=ds_coarse.extent,
                          linewidths=1,
                          antialiased=True)
        ax2.set_title("Smoothed 2m DEM")

        ax2.imshow(dem_fine,
                   vmin=vmin,
                   vmax=vmax,
                   origin='upper',
                   extent=ds_fine.extent)
        cs2 = ax2.contour(dem_fine,
                          levels,
                          origin='upper',
                          colors='k',
                          extent=ds_fine.extent,
                          linewidths=1,
                          antialiased=True)
        ax3.set_title("Original 2m DEM")
        ax3.imshow(dem,
                   vmin=vmin,
                   vmax=vmax,
                   origin='upper',
                   extent=ds_fine.extent)
        cs3 = ax3.contour(dem,
                          levels,
                          origin='upper',
                          colors='k',
                          extent=ds_fine.extent,
                          linewidths=1,
                          antialiased=True)

        #plt.clabel(cs1, inline=1, fontsize=10)

        plt.show()
    ds_coarse.write_copy(output, dem_final)