Beispiel #1
0
def conv_backward_naive(dout, cache):
  """
  A naive implementation of the backward pass for a convolutional layer.

  Inputs:
  - dout: Upstream derivatives.
  - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive

  Returns a tuple of:
  - dx: Gradient with respect to x
  - dw: Gradient with respect to w
  - db: Gradient with respect to b
  """
  x, w, b, conv_param = cache
  p = conv_param['pad']
  s = conv_param['stride']
  
  dx = np.zeros_like(x)
  N, C, H, W = x.shape
  F, C, HH, WW = w.shape
  lmin = 0
  lmax = int((H + 2*p - HH)/s)
  kmin = 0
  kmax = int((W + 2*p - WW)/s)
  for n in range(N):
      for c in range(C):
          for h in range(H):
              for g in range(W):
                  delta = 0.0
                  l0 = int(np.maximum(lmin, np.ceil((h+p-HH+1)/s)))
                  l1 = int(np.minimum(lmax, np.floor((h+p)/s)))
                  k0 = int(np.maximum(kmin,np.ceil((g+p-WW+1)/s)))
                  k1 = int(np.minimum(kmax, np.floor((g+p)/s)))
                  for l in range(l0,l1+1):
                      for k in range(k0, k1+1):
                          for f in range(F):
                              delta += w[f,c,h + p - l * s, g + p - k*s] * dout[n,f,l,k]
          
                  dx[n,c,h,g] = delta  
  
  dw = np.zeros_like(w)  
  for f in range(F):
      for c in range(C):
          for i in range(HH):
              for j in range(WW):
                  delta = 0.0
                  l0 = int(np.maximum(lmin, np.ceil((p-i)/s)))
                  l1 = int(np.minimum(lmax, np.floor((p-i+H-1)/s)))
                  k0 = int(np.maximum(kmin, np.ceil((p-j)/s)))
                  k1 = int(np.minimum(kmax, np.floor((p-j+W-1)/s)))
                  for l in range(l0, l1 + 1):
                      for k in range(k0, k1 + 1):
                          for n in range(N):
                              delta += x[n,c,-p+l*s+i, -p+k*s+j] * dout[n,f,l,k]
      
                  dw[f,c,i,j] = delta

  db = np.sum(dout, axis=(0,2,3))
 ##########
  return dx, dw, db
Beispiel #2
0
    def scale(self, factor_x, factor_y=None):
        """
        Expand or contract the bounding box or contour around its center by a given factor

        :param factor_x: The multiplicative scale parameter in the x direction
        :type factor_x: float
        :param factor_y: The multiplicative scale parameter in the y direction
        :type factor_y: float

        .. note::
            if factor_y parameter is omitted, then the factor_x is used in both directions

        .. note::
            The scaling is done with respect to the contour's centroid as computed by the get_centroid
            methods.

        :Example:
        ::

            shape = (100, 100, 3)
            image = np.zeros(shape, dtype=np.uint8)
            d = bounding_region(shape, contour=np.array([[[10, 20]],
                                                         [[25, 15]],
                                                         [[80, 65]],
                                                         [[60, 70]],
                                                         [[20, 75]],
                                                         [[5, 50]]]))
            d.draw_contour(image, color=(0, 255, 0))
            # Scale to half the size
            d.scale(0.5)
            d.draw_contour(image, color=(255, 255, 0))
            d.draw_box(image)
            cv2.imshow("Two contours", image)
            cv2.waitKey(0)

        """
        if self._empty:
            return
        if factor_y is None:
            factor_y = factor_x
        if self.image_shape is None:
            raise Exception("Image shape is nescessary to compute the relative coordinates")
        if self.box_is_primary:
            shift_x = self.box[2] * (1.-factor_x) * 0.5
            shift_y = self.box[3] * (1.-factor_y) * 0.5
            self.box = np.array([np.maximum(self.box[0]+shift_x, 0),
                                 np.maximum(self.box[1]+shift_y, 0),
                                 np.minimum(self.box[2]*factor_x, self.image_shape[1]),
                                 np.minimum(self.box[3]*factor_y, self.image_shape[0])]).astype(np.int32)
            self._contour_from_box()
            self._update_internals()
        else:
            (cx, cy) = self.get_centroid_pixels()
            new_contour = np.zeros_like(self.contour, dtype=np.int32)
            for i in xrange(self.contour.shape[0]):
                new_contour[i][0][0] = np.clip(int(cx + (self.contour[i][0][0]-cx)*factor_x), a_min=0, a_max=self.image_shape[1])
                new_contour[i][0][1] = np.clip(int(cy + (self.contour[i][0][1]-cy)*factor_y), a_min=0, a_max=self.image_shape[0])
            self.contour = new_contour
            self._box_from_contour()
            self._update_internals()
    def process_chunk(self, t0, t1, intensity, weights, pp_intensity, pp_weights):
        # Loop over intensity/weights in chunks of size v1_chunk
        for ichunk in xrange(0, self.nt_chunk, self.v1_chunk):
            for frequency in xrange(self.nfreq):
                # Calculate the v1 for each frequency
                self.v1_tmp[frequency] =  self._v1(intensity[frequency, ichunk:ichunk+self.v1_chunk], weights[frequency, ichunk:ichunk+self.v1_chunk])

            # Once v1s have been calculated for each frequency, update the weights and running variance
            non_zero_v1 = self.v1_tmp != 0
            zero_v1 = np.logical_not(non_zero_v1)

            # For nonzero (successful) v1s, increase the weights (if possible) and update the running variance
            self.running_weights[non_zero_v1] = np.minimum(2.0, self.running_weights[non_zero_v1] + self.w_clamp)
            self.v1_tmp[non_zero_v1] = np.minimum((1-self.var_weight) * self.running_var[non_zero_v1] + self.var_weight * self.v1_tmp[non_zero_v1], 
                                                  self.running_var[non_zero_v1] + self.var_clamp_add + self.running_var[non_zero_v1] * self.var_clamp_mult)
            self.v1_tmp[non_zero_v1] = np.maximum(self.v1_tmp[non_zero_v1], self.running_var[non_zero_v1] - self.var_clamp_add - self.running_var[non_zero_v1] * self.var_clamp_mult)
            self.running_var[non_zero_v1] = self.v1_tmp[non_zero_v1]

            # For unsuccessful v1s, decrease the weights (if possible) and do not modify the running variance 
            self.running_weights[zero_v1] = np.maximum(0, self.running_weights[zero_v1] - self.w_clamp)
            
            # Mask fill!
            intensity_valid = (weights[:, ichunk:ichunk+self.v1_chunk] > self.w_cutoff)
            rand_intensity = np.random.standard_normal(size=intensity[:, ichunk:ichunk+self.v1_chunk].shape)
            for (ifreq,v) in enumerate(self.running_var):
                if v > 0.0:
                    rand_intensity[ifreq, :] *= v**0.5
            intensity[:, ichunk:ichunk+self.v1_chunk] = np.where(intensity_valid, intensity[:, ichunk:ichunk+self.v1_chunk], rand_intensity)
            weights[:, ichunk:ichunk+self.v1_chunk] = np.repeat(self.running_weights, self.v1_chunk).reshape(self.nfreq, self.v1_chunk)
Beispiel #4
0
def nms(dets, thresh):
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    scores = dets[:, 4]

    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = scores.argsort()[::-1]

    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)
        xx1 = np.maximum(x1[i], x1[order[1:]])
        yy1 = np.maximum(y1[i], y1[order[1:]])
        xx2 = np.minimum(x2[i], x2[order[1:]])
        yy2 = np.minimum(y2[i], y2[order[1:]])

        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        ovr = inter / ((areas[i] + areas[order[1:]] - inter)+0.0000001)

        inds = np.where(ovr <= thresh)[0]
        order = order[inds + 1]

    return keep
Beispiel #5
0
    def autoRange(self,zmin=None,zmax=None,coordList=None):
        """ determine X,Y,Z limits
        """

        if coordList==None:
            coordList = self.coordList

        # autorange in X,Y
        minX = 1.e50
        maxX = -1.e50
        minY = 1.e50
        maxY = -1.e50
        minZ = 1.e50
        maxZ = -1.e50
        for iCoord in coordList:

            #if self.interpPresent:
            #    X,Y = self.interpGrids[iCoord]
            #    Z = self.interpValues[iCoord]
            #else:
            X,Y,Z = self.getXYZpoints(coordList=coordList)

            minX = numpy.minimum(minX,X.min())
            maxX = numpy.maximum(maxX,X.max())
            minY = numpy.minimum(minY,Y.min())
            maxY = numpy.maximum(maxY,Y.max())
            minZ = numpy.minimum(minZ,Z.min())
            maxZ = numpy.maximum(maxZ,Z.max())

        if zmin!=None:
            minZ = zmin
        if zmax!=None:
            maxZ = zmax

        return minX,maxX,minY,maxY,minZ,maxZ
    def SetZoomAxes(self):

        x = self.zoompoint[0]
        y = self.zoompoint[1]
        
        W = params.params.movie.get_width()
        H = params.params.movie.get_height()
        h = H/self.zoomfactor
        w = W/self.zoomfactor
        x1 = x-w/2
        x2 = x+w/2
        y1 = y-h/2
        y2 = y+h/2

        if x1 < 0:
            x2 -= x1
            x1 = 0
        elif x2 > W-1:
            x1 -= (x2 - W + 1)
            x2 = W-1
        if y1 < 0:
            y2 -= y1
            y1 = 0
        elif y2 > H-1:
            y1 -= (y2 - H + 1)
            y2 = H-1
        x1 = num.maximum(int(x1),0)
        x2 = num.minimum(int(x2),W-1)
        y1 = num.maximum(int(y1),0)
        y2 = num.minimum(int(y2),H-1)

        self.zoomaxes = [x1,x2,y1,y2]
        self.ShowImage()
Beispiel #7
0
	def ParseMDH (self, n = 0, dry = False):
		''' Parse one MDH entry '''

		self.bm = struct.unpack_from ('2I', self.buf, n +  20)
		if (self.bm[0] & SYNCDATA):
			return n + MDHSIZE
		
		if (self.bm[0] & NOISEADJSCAN):
			self.lc  = struct.unpack_from ('%dH' % 16, self.buf, n + 28) # loop counters
			self.ch  = struct.unpack_from ('H', self.buf, n + 124)
	
			if (dry):
				if (self.noisedims[0] == 0):
					self.noisedims[0] = self.lc[0]
					self.noisedims[1] = struct.unpack_from ('H', self.buf, n + 30)[0]
					self.noisencolb = self.noisedims[md.COL] * self.ndds
					self.noisedims[2:16] = np.array(self.lc[2:16])
				else:
					self.noisedims[2:16] = np.maximum (self.noisedims[2:16],self.lc[2:16])
			return n + MDHSIZE
				
		self.lc  = struct.unpack_from ('%dH' % 16, self.buf, n + 28) # loop counters
		self.ch  = struct.unpack_from ('H', self.buf, n + 124)

		if (dry):
			if (self.dims[0] == 0):
				self.dims[0] = self.lc[0]
				self.dims[1] = struct.unpack_from ('H', self.buf, n + 30)[0]
				self.ncolb = self.dims[md.COL] * self.ds
				self.dims[2:16] = np.array(self.lc[2:16])
			else:
				self.dims[2:16] = np.maximum (self.dims[2:16],self.lc[2:16])

		return n + MDHSIZE
Beispiel #8
0
    def stat(self, baseline=0.0):
        """
        Return the decision statistic associated with the test of the
        null hypothesis: (H0) 'contrast equals baseline'
        """
        self._baseline = baseline

        # Case: one-dimensional contrast ==> t or t**2
        if self.dim == 1:
            # avoids division by zero
            t = (self.effect - baseline) / np.sqrt(
                np.maximum(self.variance, self._tiny))
            if self.type == 'F':
                t = t ** 2
        # Case: F contrast
        elif self.type == 'F':
            # F = |t|^2/q ,  |t|^2 = e^t v-1 e
            t = mahalanobis(self.effect - baseline, np.maximum(
                    self.variance, self._tiny)) / self.dim
        # Case: tmin (conjunctions)
        elif self.type == 'tmin':
            vdiag = self.variance.reshape([self.dim ** 2] + list(
                    self.variance.shape[2:]))[:: self.dim + 1]
            t = (self.effect - baseline) / np.sqrt(
                np.maximum(vdiag, self._tiny))
            t = t.min(0)

        # Unknwon stat
        else:
            raise ValueError('Unknown statistic type')
        self._stat = t
        return t
Beispiel #9
0
def _joint_probabilities(distances, desired_perplexity, verbose):
    """Compute joint probabilities p_ij from distances.

    Parameters
    ----------
    distances : array, shape (n_samples * (n_samples-1) / 2,)
        Distances of samples are stored as condensed matrices, i.e.
        we omit the diagonal and duplicate entries and store everything
        in a one-dimensional array.

    desired_perplexity : float
        Desired perplexity of the joint probability distributions.

    verbose : int
        Verbosity level.

    Returns
    -------
    P : array, shape (n_samples * (n_samples-1) / 2,)
        Condensed joint probability matrix.
    """
    # Compute conditional probabilities such that they approximately match
    # the desired perplexity
    distances = distances.astype(np.float32, copy=False)
    conditional_P = _utils._binary_search_perplexity(
        distances, None, desired_perplexity, verbose)
    P = conditional_P + conditional_P.T
    sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
    P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
    return P
Beispiel #10
0
def compute_overlap(a, b):
    """
    Parameters
    ----------
    a: (N, 4) ndarray of float
    b: (K, 4) ndarray of float
    Returns
    -------
    overlaps: (N, K) ndarray of overlap between boxes and query_boxes
    """
    area = (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1)

    iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0]) + 1
    ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1]) + 1

    iw = np.maximum(iw, 0)
    ih = np.maximum(ih, 0)

    ua = np.expand_dims((a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), axis=1) + area - iw * ih

    ua = np.maximum(ua, np.finfo(float).eps)

    intersection = iw * ih

    return intersection / ua
Beispiel #11
0
    def update_swe(self):

        #--------------------------------------------------------
        # Note: The Meteorology component uses air temperature
        # to compute P_rain (precip that falls as liquid) and
        # P_snow (precip that falls as snow or ice) separately.
        # P_snow = (self.P * (self.T_air <= 0)) 
        #----------------------------------------------------------
        # Note: This method must be written to work regardless
        # of whether P_rain and T are scalars or grids. (3/14/07)
        #------------------------------------------------------------
        # If P or T_air is a grid, then h_swe and h_snow are grids.
        # This is set up in initialize_computed_vars().
        #------------------------------------------------------------
      
        #------------------------------------------------
        # Increase snow water equivalent due to snowfall
        #------------------------------------------------
        # Meteorology and Channel components may have
        # different time steps, but then self.P_snow
        # will be a time-interpolated value.
        #------------------------------------------------
        dh1_swe  = (self.P_snow * self.dt)
        self.h_swe  += dh1_swe

        #------------------------------------------------
        # Decrease snow water equivalent due to melting
        # Note that SM depends partly on h_snow.
        #------------------------------------------------
        dh2_swe    = self.SM * self.dt
        self.h_swe -= dh2_swe
        np.maximum(self.h_swe, np.float64(0), self.h_swe)  # (in place)
Beispiel #12
0
def fuzzy_c_means(points, num_centers, m=2., tol=1e-4, max_iter=100,
                  verbose=False):
  '''Uses Fuzzy C-Means to downsample `points`.
  m : aggregation parameter >1, larger implies smoother clusters
  Returns indices of downsampled points.
  '''
  num_points = points.shape[0]
  if num_centers >= num_points:
    return np.arange(num_points)
  # randomly initialize cluster assignments matrix
  assn = np.random.random((points.shape[0], num_centers))
  # iterate assignments until they converge
  for i in range(max_iter):
    # compute centers
    w = assn ** m
    w /= w.sum(axis=0)
    centers = w.T.dot(points)
    # calculate new assignments
    d = pairwise_distances(points, centers)
    d **= 2. / (m - 1)
    np.maximum(d, 1e-10, out=d)
    new_assn = 1. / np.einsum('ik,ij->ik', d, 1./d)
    # check for convergence
    change = np.linalg.norm(new_assn - assn)
    if verbose:
      print('At iteration %d: change = %g' % (i+1, change))
    if change < tol:
      break
    assn = new_assn
  else:
    warnings.warn("fuzzy_c_means didn't converge in %d iterations" % max_iter)
  # find points closest to the selected cluster centers
  return d.argmin(axis=0)
    def _init_energy(self, pc):
        if pc is self._pc:
            return
        self.set_transform(self._t, pc)
        self._pc = pc
        self._res[:] = self.data[:, self._t] - self.mu[:]
        self._V = np.maximum(self.offset + np.mean(self._res ** 2), SMALL)
        self._res0[:] = self.data[:, self._t] - self.mu0
        self._V0 = np.maximum(self.offset0 + np.mean(self._res0 ** 2), SMALL)

        if self.use_derivatives:
            # linearize the data wrt the transform parameters
            # use the auxiliary array to save the current resampled data
            self._aux[:] = self.data[:, self._t]
            basis = np.eye(6)
            for j in range(pc.size):
                self.set_transform(self._t, pc + self.stepsize * basis[j])
                self.A[:, j] = (self.data[:, self._t] - self._aux)\
                    / self.stepsize
            self.transforms[self._t].param = pc
            self.data[:, self._t] = self._aux[:]
            # pre-compute gradient and hessian of numerator and
            # denominator
            c = 2 / float(self.data.shape[0])
            self._dV = c * np.dot(self.A.T, self._res)
            self._dV0 = c * np.dot(self.A.T, self._res0)
            self._H = c * np.dot(self.A.T, self.A)
Beispiel #14
0
def psfplots():
	tpsf = wise.get_psf_model(1, pixpsf=True)
	
	psfp = tpsf.getPointSourcePatch(0, 0)
	psf = psfp.patch
	
	psf /= psf.sum()
	
	plt.clf()
	plt.imshow(np.log10(np.maximum(1e-5, psf)), interpolation='nearest', origin='lower')
	plt.colorbar()
	ps.savefig()
	
	h,w = psf.shape
	cx,cy = w/2, h/2
	
	X,Y = np.meshgrid(np.arange(w), np.arange(h))
	R = np.sqrt((X - cx)**2 + (Y - cy)**2)
	plt.clf()
	plt.semilogy(R.ravel(), psf.ravel(), 'b.')
	plt.xlabel('Radius (pixels)')
	plt.ylabel('PSF value')
	plt.ylim(1e-8, 1.)
	ps.savefig()
	
	plt.clf()
	plt.loglog(R.ravel(), psf.ravel(), 'b.')
	plt.xlabel('Radius (pixels)')
	plt.ylabel('PSF value')
	plt.ylim(1e-8, 1.)
	ps.savefig()
	
	print('PSF norm:', np.sqrt(np.sum(np.maximum(0, psf)**2)))
	print('PSF max:', psf.max())
Beispiel #15
0
def shifted_corr(reference, image, displacement):
    """Calculate the correlation between the reference and the image shifted
    by the given displacement.

    Parameters
    ----------
    reference : np.ndarray
    image : np.ndarray
    displacement : np.ndarray

    Returns
    -------
    correlation : float

    """

    ref_cuts = np.maximum(0, displacement)
    ref = reference[ref_cuts[0]:, ref_cuts[1]:, ref_cuts[2]:]
    im_cuts = np.maximum(0, -displacement)
    im = image[im_cuts[0]:, im_cuts[1]:, im_cuts[2]:]
    s = np.minimum(im.shape, ref.shape)
    ref = ref[:s[0], :s[1], :s[2]]
    im = im[:s[0], :s[1], :s[2]]
    ref -= nanmean(ref.reshape(-1, ref.shape[-1]), axis=0)
    ref = np.nan_to_num(ref)
    im -= nanmean(im.reshape(-1, im.shape[-1]), axis=0)
    im = np.nan_to_num(im)
    assert np.all(np.isfinite(ref)) and np.all(np.isfinite(im))
    corr = nanmean(
        [old_div(np.sum(i * r), np.sqrt(np.sum(i * i) * np.sum(r * r))) for
         i, r in zip(np.rollaxis(im, -1), np.rollaxis(ref, -1))])
    return corr
Beispiel #16
0
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
    """Shift a point to the interior of a feasible region.
    
    Each element of the returned vector is at least at a relative distance
    `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
    """
    x_new = x.copy()

    active = find_active_constraints(x, lb, ub, rstep)
    lower_mask = np.equal(active, -1)
    upper_mask = np.equal(active, 1)

    if rstep == 0:
        x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
        x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
    else:
        x_new[lower_mask] = (lb[lower_mask] +
                             rstep * np.maximum(1, np.abs(lb[lower_mask])))
        x_new[upper_mask] = (ub[upper_mask] -
                             rstep * np.maximum(1, np.abs(ub[upper_mask])))

    tight_bounds = (x_new < lb) | (x_new > ub)
    x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])

    return x_new
Beispiel #17
0
def test_maximum_minimum_scalar():
    data1 = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp1 = np.random.rand(3,4)
    data_tmp1[:] = 2
 
    arr_data1 = mx.nd.array(data_tmp1)
    arr_grad1 = mx.nd.empty(shape)

    test =  mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
    exe_test = test.bind(mx.cpu(), args=[arr_data1], args_grad=[arr_grad1])
    exe_test.forward()
    out = exe_test.outputs[0].asnumpy()
    npout =  np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
    assert reldiff(out, npout) < 1e-6

    out_grad = mx.nd.empty(shape)
    out_grad[:] = 2
    exe_test.backward(out_grad)
    
    npout_grad = np.ones(shape)
    npout_grad[:] = 2
    mask1 = (data_tmp1 > 3).astype('float')
    mask2 = (9 > data_tmp1).astype('float')
    mask3 = (5 < data_tmp1).astype('float')
    mask4 = (data_tmp1 < 4).astype('float')
    npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
    
    assert reldiff(arr_grad1.asnumpy(), npout_grad1) < 1e-6
Beispiel #18
0
 def update_core_cpu(self, param):
     grad = param.grad
     if grad is None:
         return
     hp = self.hyperparam
     eps = grad.dtype.type(hp.eps)
     if hp.eps != 0 and eps == 0:
         raise ValueError(
             'eps of Adam optimizer is too small for {} ({})'.format(
                 grad.dtype.name, hp.eps))
     m, v = self.state['m'], self.state['v']
     if (isinstance(m, intel64.mdarray)
             and isinstance(v, intel64.mdarray)):
         m.inplace_axpby(1.0, 1.0 - hp.beta1, grad - m)
         v.inplace_axpby(1.0, 1.0 - hp.beta2, grad*grad - v)
         if hp.amsgrad:
             vhat = self.state['vhat']
             numpy.maximum(vhat, v, out=vhat)
         else:
             vhat = v
         param.data.inplace_axpby(
             1.0 - hp.weight_decay_rate, -hp.eta,
             self.alpha_t * m / (numpy.sqrt(vhat) + hp.eps))
     else:
         m += (1 - hp.beta1) * (grad - m)
         v += (1 - hp.beta2) * (grad * grad - v)
         if hp.amsgrad:
             vhat = self.state['vhat']
             numpy.maximum(vhat, v, out=vhat)
         else:
             vhat = v
         param.data -= hp.eta * (
             self.alpha_t * m / (numpy.sqrt(vhat) + hp.eps) +
             hp.weight_decay_rate * param.data)
Beispiel #19
0
def ensure_within_bounds(box, size):
    box['xmin'] = np.minimum(np.maximum(box['xmin'], 0), size['width']-1)
    box['xmax'] = np.minimum(np.maximum(box['xmax'], 0), size['width']-1)
    box['ymin'] = np.minimum(np.maximum(box['ymin'], 0), size['height']-1)
    box['ymax'] = np.minimum(np.maximum(box['ymax'], 0), size['height']-1)

    return box
Beispiel #20
0
def clip_lower(arr,lower_bound):
    """
    In-place, one-sided version of numpy.clip().

    i.e. numpy.clip(arr,a_min=lower_bound,out=arr) if it existed.
    """
    maximum(arr,lower_bound,arr)
def MakeQCFlag(ds,SeriesList):
    flag = []
    if len(SeriesList)<=0:
        #log.info('  MakeQCFlag: no series list specified')
        pass
    if len(SeriesList)==1:
        if SeriesList[0] in ds.series.keys():
            flag = ds.series[SeriesList[0]]['Flag'].copy()
        else:
            log.error('  MakeQCFlag: series '+str(SeriesList[0])+' not in ds.series')
    if len(SeriesList)>1:
        for ThisOne in SeriesList:
            if ThisOne in ds.series.keys():
                if len(flag)==0:
                    #flag = numpy.ones(numpy.size(ds.series[ThisOne]['Flag']))
                    flag = ds.series[ThisOne]['Flag'].copy()
                else:
                    tmp_flag = ds.series[ThisOne]['Flag'].copy()      # get a temporary copy of the flag
                    goodindex = numpy.where((numpy.mod(tmp_flag,10)==0) & (numpy.mod(flag,10)==0))    # find the elements with flag = 0, 10, 20 etc
                    badindex1 = numpy.where((numpy.mod(tmp_flag,10)==0) & (numpy.mod(flag,10)!=0))
                    badindex2 = numpy.where((numpy.mod(tmp_flag,10)!=0) & (numpy.mod(flag,10)==0))
                    badindex3 = numpy.where((numpy.mod(tmp_flag,10)!=0) & (numpy.mod(flag,10)!=0))
                    tmp_flag1 = numpy.zeros(len(tmp_flag),dtype=numpy.int32) + tmp_flag
                    flag2 = numpy.zeros(len(flag),dtype=numpy.int32) + flag
                    tmp_flag[badindex1] = 0                               # set them all to 0
                    flag2[badindex2] = 0                               # set them all to 0
                    flag[badindex1] = numpy.maximum(tmp_flag[badindex1],flag[badindex1])               # now take the maximum
                    flag[badindex2] = numpy.maximum(tmp_flag1[badindex2],flag2[badindex2])
                    flag[badindex3] = numpy.maximum(tmp_flag1[badindex3],flag[badindex3])
                    flag[goodindex] = numpy.maximum(tmp_flag1[goodindex],flag[goodindex])
            else:
                log.error('  MakeQCFlag: series '+ThisOne+' not in ds.series')
    return flag
def rp_gumbel_original(p_zero, loc, scale, flvol, max_return_period=1e9):
    """
    Transforms a unique, or array of flood volumes into the belonging return
    periods, according to gumbel parameters (belonging to non-zero part of the
    distribution) and a zero probability
    Inputs:
        p_zero:        probability that flood volume is zero
        loc:           Gumbel location parameter (of non-zero part of distribution)
        scale:         Gumbel scale parameter (of non-zero part of distribution)
        flvol:         Flood volume that will be transformed to return period
        max_return_period: maximum return period considered. This maximum is needed to prevent that floating point
                        precision becomes a problem (default: 1e9)
    This function is copied from: https://repos.deltares.nl/repos/Hydrology/trunk/GLOFRIS/src/rp_bias_corr.py
    """
    
    np.seterr(divide='ignore')
    np.seterr(invalid='ignore')
    max_p = 1-1./max_return_period
    max_p_residual = np.minimum(np.maximum((max_p-np.float64(p_zero))/(1-np.float64(p_zero)), 0), 1)
    max_reduced_variate = -np.log(-np.log(np.float64(max_p_residual)))
    # compute the gumbel reduced variate belonging to the Gumbel distribution (excluding any zero-values)
    # make sure that the reduced variate does not exceed the one, resembling the 1,000,000 year return period
    reduced_variate = np.minimum((flvol-loc)/scale, max_reduced_variate)
    # reduced_variate = (flvol-loc)/scale
    # transform the reduced variate into a probability (residual after removing the zero volume probability)
    p_residual = np.minimum(np.maximum(np.exp(-np.exp(-np.float64(reduced_variate))), 0), 1)
    # tranform from non-zero only distribution to zero-included distribution
    p = np.minimum(np.maximum(p_residual*(1-p_zero) + p_zero, p_zero), max_p)  # Never larger than max_p
    # transform into a return period    
    return_period = 1./(1-p)
    test_p = p == 1    
    return return_period, test_p
Beispiel #23
0
 def _logpmf(self, x, mu, alpha, p):
     mu_p = mu ** (p - 1.)
     a1 = np.maximum(np.nextafter(0, 1), 1 + alpha * mu_p)
     a2 = np.maximum(np.nextafter(0, 1), mu + (a1 - 1.) * x)
     logpmf_ = np.log(mu) + (x - 1.) * np.log(a2)
     logpmf_ -=  x * np.log(a1) + gammaln(x + 1.) + a2 / a1
     return logpmf_
Beispiel #24
0
    def pdf(self, t):
        """
        Implementing the code distributed with Logan et al. 2013 using
        the reparametrization given above.

        Also, the constant theta is added as usual.
        """
        t=np.maximum(t-self.theta, 1e-5) # absorbed into pdf
        sqrt_t=np.sqrt(t)

        # reparametrization
        a=self.A/2.0
        k=self.alpha-self.A/2.0
        l=self.gamma

        if self.A<1e-10: # this is the solution without starting-point variability
            r=self.alpha/(np.sqrt(2*np.pi*(t**3)))*np.exp(- ((self.alpha-self.gamma*t)**2)/(2*t))
        elif self.gamma<1e-10:
            r=np.exp( -.5*( np.log(2)+np.log(np.pi)+np.log(t))
                      + np.log( np.exp(-( (k-a)**2/(2*t)))-np.exp(-( (k+a)**2/(2*t) )) )
                      - np.log(2) - np.log(a) )
        else:
            r=np.exp( np.log( (np.exp(- (a-k+t*l)**2/(2*t) )-np.exp(- (a+k-t*l)**2/(2*t) ))/np.sqrt(2*np.pi*t)
                              + np.exp(np.log(.5)+np.log(l))*( 2*pnormP( (-k+a)/sqrt_t + sqrt_t*l)-1
                                                               + 2*pnormP( (k+a)/sqrt_t - sqrt_t*l)-1) )
                      - np.log(2) - np.log(a))

        return np.maximum(0.0, np.where( np.isnan(r), 0, r))
Beispiel #25
0
    def cdf(self,t):
        t=np.maximum(t-self.theta, 1e-5) # absorbed into cdf

        sqrt_t=np.sqrt(t)

        # reparametrization
        a=self.A/2.0
        k=self.alpha-self.A/2.0
        l=self.gamma

        if self.A<1e-10: # this is the solution without starting-point variability
            r=pnormP( (self.gamma*t-self.alpha)/sqrt_t)+np.exp(2*self.alpha*self.gamma)*pnormP(-(self.gamma*t+self.alpha)/(sqrt_t))
        elif self.gamma<1e-10:
            r=(( -(k+a)*(2*pnormP( (k+a)/sqrt_t)-1)
                 -(k-a)*(2*pnormP(-(k-a)/sqrt_t)-1))/(2*a)) \
              + (1 + np.exp(-.5*(k-a)**2/t - .5*np.log(2) - .5*np.log(np.pi) + .5*np.log(t) - np.log(a))
                 -   np.exp(-.5*(k+a)**2/t - .5*np.log(2) - .5*np.log(np.pi) + .5*np.log(t) - np.log(a)))
        else:
            t1=np.exp( .5*np.log(t)-.5*np.log(2*np.pi) ) * (  np.exp( -((k-a-t*l)**2/t)/2.)
                                                            - np.exp( -((k+a-t*l)**2/t)/2.) ) # ok
            t2=a+(   np.exp(2*l*(k+a)+np.log(pnormP(-(k+a+t*l)/sqrt_t)))
                   - np.exp(2*l*(k-a)+np.log(pnormP(-(k-a+t*l)/sqrt_t))) )/(2*l) # ok
            t4= (.5*(t*l-a-k+.5/l)) * ( 2*pnormP((k+a)/sqrt_t-sqrt_t*l)-1) \
               + .5*(k-a-t*l-.5/l)*( 2*pnormP((k-a)/sqrt_t-sqrt_t*l)-1)
            r=(t4+t2+t1)/(2*a)


        return np.minimum( np.maximum( 0., np.where( np.isnan(r), 0, r) ), 1.)
Beispiel #26
0
    def test_elementwise_max_grad(self, n, m, d, gc, dc):
        go = np.random.rand(n, m, d).astype(np.float32)
        X = np.random.rand(n, m, d).astype(np.float32)
        Y = np.random.rand(n, m, d).astype(np.float32)
        Z = np.random.rand(n, m, d).astype(np.float32)
        mx = np.maximum(np.maximum(X, Y), Z)
        inputs = [mx, go, X, Y, Z]

        def max_grad_op(mx, go, X, Y, Z):
            def mx_grad(a):
                return go * (mx == a)

            return [mx_grad(a) for a in [X, Y, Z]]

        op = core.CreateOperator(
            "MaxGradient",
            ["mx", "go", "X", "Y", "Z"],
            ["gX", "gY", "gZ"]
        )

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=inputs,
            reference=max_grad_op,
        )
        self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
def nms(boxes, threshold, method):
    if boxes.size == 0:
        return np.empty((0, 3))
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    s = boxes[:, 4]
    area = (x2 - x1 + 1) * (y2 - y1 + 1)
    I = np.argsort(s)
    pick = np.zeros_like(s, dtype=np.int16)
    counter = 0
    while I.size > 0:
        i = I[-1]
        pick[counter] = i
        counter += 1
        idx = I[0:-1]
        xx1 = np.maximum(x1[i], x1[idx])
        yy1 = np.maximum(y1[i], y1[idx])
        xx2 = np.minimum(x2[i], x2[idx])
        yy2 = np.minimum(y2[i], y2[idx])
        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        if method is 'Min':
            o = inter / np.minimum(area[i], area[idx])
        else:
            o = inter / (area[i] + area[idx] - inter)
        I = I[np.where(o <= threshold)]
    pick = pick[0:counter]
    return pick
def nms2d(boxes, overlap=0.3):
    """Compute the nms given a set of scored boxes,
    as numpy array with 5 columns <x1> <y1> <x2> <y2> <score>
    return the indices of the tubelets to keep
    """

    if boxes.size == 0:
        return np.array([],dtype=np.int32)

    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]

    scores = boxes[:, 4]
    areas = (x2-x1+1) * (y2-y1+1)
    I = np.argsort(scores)
    indices = np.zeros(scores.shape, dtype=np.int32)

    counter = 0
    while I.size > 0:
        i = I[-1]
        indices[counter] = i
        counter += 1

        xx1 = np.maximum(x1[i],x1[I[:-1]])
        yy1 = np.maximum(y1[i],y1[I[:-1]])
        xx2 = np.minimum(x2[i],x2[I[:-1]])
        yy2 = np.minimum(y2[i],y2[I[:-1]])

        inter = np.maximum(0.0, xx2 - xx1 + 1) * np.maximum(0.0, yy2 - yy1 + 1)
        iou = inter / (areas[i] + areas[I[:-1]] - inter)
        I = I[np.where(iou <= overlap)[0]]

    return indices[:counter]
Beispiel #29
0
def prox_l1(Y, alpha, n_orient):
    """proximity operator for l1 norm with multiple orientation support

    L2 over orientation and L1 over position (space + time)

    Example
    -------
    >>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
    >>> Y = np.r_[Y, np.zeros_like(Y)]
    >>> print Y
    [[ 1.  2.  3.  2.  0.]
     [ 1.  2.  3.  2.  0.]
     [ 0.  0.  0.  0.  0.]
     [ 0.  0.  0.  0.  0.]]
    >>> Yp, active_set = prox_l1(Y, 2, 2)
    >>> print Yp
    [[ 0.          0.58578644  1.58578644  0.58578644  0.        ]
     [ 0.          0.58578644  1.58578644  0.58578644  0.        ]]
    >>> print active_set
    [ True  True False False]
    """
    n_positions = Y.shape[0] // n_orient
    norms = np.sqrt(np.sum((np.abs(Y) ** 2).T.reshape(-1, n_orient), axis=1))
    # Ensure shrink is >= 0 while avoiding any division by zero
    shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
    shrink = shrink.reshape(-1, n_positions).T
    active_set = np.any(shrink > 0.0, axis=1)
    shrink = shrink[active_set]
    if n_orient > 1:
        active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
    Y = Y[active_set]
    if len(Y) > 0:
        for o in range(n_orient):
            Y[o::n_orient] *= shrink
    return Y, active_set
Beispiel #30
0
def _beta_divergence_dense(X, W, H, beta):
    """Compute the beta-divergence of X and W.H for dense array only.

    Used as a reference for testing nmf._beta_divergence.
    """
    if isinstance(X, numbers.Number):
        W = np.array([[W]])
        H = np.array([[H]])
        X = np.array([[X]])

    WH = np.dot(W, H)

    if beta == 2:
        return squared_norm(X - WH) / 2

    WH_Xnonzero = WH[X != 0]
    X_nonzero = X[X != 0]
    np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)

    if beta == 1:
        res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
        res += WH.sum() - X.sum()

    elif beta == 0:
        div = X_nonzero / WH_Xnonzero
        res = np.sum(div) - X.size - np.sum(np.log(div))
    else:
        res = (X_nonzero ** beta).sum()
        res += (beta - 1) * (WH ** beta).sum()
        res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
        res /= beta * (beta - 1)

    return res
def quantized_np(array, scale, data_width=8):
    quantized_array = np.round(array / scale)
    quantized_array = np.maximum(quantized_array, -2**(data_width - 1))
    quantized_array = np.minimum(quantized_array, 2**(data_width - 1) - 1)
    return quantized_array
Beispiel #32
0
def mel(sr,
        n_fft,
        n_mels=128,
        fmin=0.0,
        fmax=None,
        htk=False,
        norm='slaney',
        dtype=np.float32):
    """Create a Filterbank matrix to combine FFT bins into Mel-frequency bins

    Parameters
    ----------
    sr        : number > 0 [scalar]
        sampling rate of the incoming signal

    n_fft     : int > 0 [scalar]
        number of FFT components

    n_mels    : int > 0 [scalar]
        number of Mel bands to generate

    fmin      : float >= 0 [scalar]
        lowest frequency (in Hz)

    fmax      : float >= 0 [scalar]
        highest frequency (in Hz).
        If `None`, use `fmax = sr / 2.0`

    htk       : bool [scalar]
        use HTK formula instead of Slaney

    norm : {None, 1, 'slaney', np.inf} [scalar]
        If 1 or 'slaney', divide the triangular mel weights by the width of the mel band
        (area normalization).
        
        .. warning:: `norm=1` and `norm=np.inf` behavior will change in version 0.8.0.

        Otherwise, leave all the triangles aiming for a peak value of 1.0

    dtype : np.dtype
        The data type of the output basis.
        By default, uses 32-bit (single-precision) floating point.

    Returns
    -------
    M         : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
        Mel transform matrix

    Notes
    -----
    This function caches at level 10.

    Examples
    --------
    >>> melfb = librosa.filters.mel(22050, 2048)
    >>> melfb
    array([[ 0.   ,  0.016, ...,  0.   ,  0.   ],
           [ 0.   ,  0.   , ...,  0.   ,  0.   ],
           ...,
           [ 0.   ,  0.   , ...,  0.   ,  0.   ],
           [ 0.   ,  0.   , ...,  0.   ,  0.   ]])


    Clip the maximum frequency to 8KHz

    >>> librosa.filters.mel(22050, 2048, fmax=8000)
    array([[ 0.  ,  0.02, ...,  0.  ,  0.  ],
           [ 0.  ,  0.  , ...,  0.  ,  0.  ],
           ...,
           [ 0.  ,  0.  , ...,  0.  ,  0.  ],
           [ 0.  ,  0.  , ...,  0.  ,  0.  ]])


    >>> import matplotlib.pyplot as plt
    >>> plt.figure()
    >>> librosa.display.specshow(melfb, x_axis='linear')
    >>> plt.ylabel('Mel filter')
    >>> plt.title('Mel filter bank')
    >>> plt.colorbar()
    >>> plt.tight_layout()
    >>> plt.show()
    """

    if fmax is None:
        fmax = float(sr) / 2

    if norm == 1:
        warnings.warn(
            'norm=1 behavior will change in librosa 0.8.0. '
            "To maintain forward compatibility, use norm='slaney' instead.",
            FutureWarning)
    elif norm == np.inf:
        warnings.warn(
            'norm=np.inf behavior will change in librosa 0.8.0. '
            "To maintain forward compatibility, use norm=None instead.",
            FutureWarning)

    elif norm not in (None, 1, 'slaney', np.inf):
        raise ParameterError(
            "Unsupported norm={}, must be one of: None, 1, 'slaney', np.inf".
            format(repr(norm)))

    # Initialize the weights
    n_mels = int(n_mels)
    weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)

    # Center freqs of each FFT bin
    fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)

    # 'Center freqs' of mel bands - uniformly spaced between limits
    mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)

    fdiff = np.diff(mel_f)
    ramps = np.subtract.outer(mel_f, fftfreqs)

    for i in range(n_mels):
        # lower and upper slopes for all bins
        lower = -ramps[i] / fdiff[i]
        upper = ramps[i + 2] / fdiff[i + 1]

        # .. then intersect them with each other and zero
        weights[i] = np.maximum(0, np.minimum(lower, upper))

    if norm in (1, 'slaney'):
        # Slaney-style mel is scaled to be approx constant energy per channel
        enorm = 2.0 / (mel_f[2:n_mels + 2] - mel_f[:n_mels])
        weights *= enorm[:, np.newaxis]

    # Only check weights if f_mel[0] is positive
    if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
        # This means we have an empty channel somewhere
        warnings.warn('Empty filters detected in mel frequency basis. '
                      'Some channels will produce empty responses. '
                      'Try increasing your sampling rate (and fmax) or '
                      'reducing n_mels.')

    return weights
Beispiel #33
0
def apply_bounds(x, varmin, varmax):
    x = np.maximum(x, varmin)
    x = np.minimum(x, varmax)
    return x
Beispiel #34
0
def fuzzyfication(M):
    L = np.maximum(M-2, np.full((M.shape), 1))
    U = np.minimum(M+2, np.full((M.shape), 9))
    return np.dstack((L, M, U))
Beispiel #35
0
white_content = mat_transforms.whitening(z_content.cpu().detach().numpy())  # (C, HW)
color_content = mat_transforms.colouring(z_style.cpu().detach().numpy(), white_content)  # (C, HW)

# alpha = 0.6
# color_content = alpha*color_content + (1.-alpha)*z_content.cpu().detach().numpy()

color_content = torch.Tensor(color_content) # tvt.ToTensor()(color_content)
color_content = color_content.view([n_channels, n_1, n_2]) # (C, H, W)
color_content = color_content.unsqueeze(0) # (1, C, H, W)

inputs_hat = decoder(color_content.to(device), maxpool_content)

new_audio = inputs_hat.squeeze(0) # (C, H, W)
new_audio = reverse_normalize(new_audio) # (C, H, W)
new_audio = new_audio[0] # take only 1 channel
# new_audio = torch.transpose(new_audio, 0, 1) # (H, C, W)
# new_audio = torch.transpose(new_audio, 1, 2) # (H, W, C)

new_audio = np.maximum(np.minimum(new_audio.cpu().detach().numpy(), 0.0), -80.0)
power_spectro = librosa.core.db_to_power(new_audio, ref=1.0)

inv_power = librosa.feature.inverse.mel_to_stft(power_spectro, sr=16000, n_fft=400, power=2.0)

full_stft = inv_power*np.exp(1j*content_ang)
y_new = librosa.core.istft(full_stft, hop_length=160, win_length=400)
# y_new = librosa.feature.inverse.mel_to_audio(power_spectro, sr=16000,
#                                              n_fft=400, hop_length=160,
#                                              win_length=400, n_iter=32) # it will take sqrt

librosa.output.write_wav("processed.wav", y_new, 16000)
Beispiel #36
0
                        img_center = img_size / 2
                        offsets = np.vstack(
                            [(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
                        offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                        index = np.argmax(
                            bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
                        det_arr.append(det[index, :])
                else:
                    det_arr.append(np.squeeze(det))

                images = np.zeros((len(det_arr), image_size, image_size, 3))
                recimg = img.copy()
                for i, det in enumerate(det_arr):
                    det = np.squeeze(det)
                    bb = np.zeros(4, dtype=np.int32)
                    bb[0] = np.maximum(det[0] - margin / 2, 0)
                    bb[1] = np.maximum(det[1] - margin / 2, 0)
                    bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
                    bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
                    cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                    cv2.imshow("face{0}".format(i),cropped)
                    cv2.rectangle(recimg,(bb[0],bb[1]),(bb[2],bb[3]),(255,0,0),2) 
                    scaled = cv2.resize(cropped, (image_size, image_size))
                    images[i] = scaled
                cv2.imshow("rectangle",recimg)
            
            if nrof_faces > 0:
                images = images
            else:
                # 如果没有检测到人脸  直接返回一个1*3的0矩阵  多少维度都行  只要能和是不是一个图片辨别出来就行
                images = np.zeros((1, 3))
Beispiel #37
0
    def work(self):
        global GLOBAL_EP, GLOBAL_COUNTER
        t = 0
        while not COORD.should_stop():
            s = self.env.reset()
            ep_r = 0
            buffer_s, buffer_a, buffer_r, buffer_v ,buffer_done = [], [], [], [], []
            done = False
            
            while not done:
                if not COLLECT_EVENT.is_set():                  
                    COLLECT_EVENT.wait()                        
                    buffer_s, buffer_a, buffer_r, buffer_v ,buffer_done = [], [], [], [], []
                a,v = self.ppo.choose_action(s)
                s_, r, done, _ = self.env.step(a)
                buffer_s.append(s)
                buffer_a.append(a)
                buffer_r.append(r)
                buffer_v.append(v)
                buffer_done.append(done)
                s = s_
                ep_r += r
                t+=1
                GLOBAL_COUNTER += 1
                # update ppo
                if (done or GLOBAL_COUNTER >= BATCH):
                    
                    t = 0
                    rewards = np.array(buffer_r)
                    v_final = [v * (1 - done)] 
                    terminals = np.array(buffer_done + [done])
                    values = np.array(buffer_v + v_final)
                    delta = rewards + GAMMA * values[1:] * (1 - terminals[1:]) - values[:-1]
                    advantage = discount(delta, GAMMA * LAMBDA, terminals)
                    returns = advantage + np.array(buffer_v)
                    advantage = (advantage - advantage.mean()) / np.maximum(advantage.std(), 1e-6)


                    bs, ba, br,badv = np.reshape(buffer_s, (-1,) + self.ppo.s_dim), np.vstack(buffer_a), \
                                    np.vstack(returns), np.vstack(advantage)
                    buffer_s, buffer_a, buffer_r = [], [], []
                    buffer_v, buffer_done = [], []
                    COLLECT_EVENT.wait()
                    self.lock.acquire()
                    for i in range(len(bs)):
                        GLOBAL_DATA["state"].append(bs[i])
                        GLOBAL_DATA["reward"].append(br[i])
                        GLOBAL_DATA["action"].append(ba[i])
                        GLOBAL_DATA["advantage"].append(badv[i])
                    self.lock.release()
                    if GLOBAL_COUNTER >= BATCH and len(GLOBAL_DATA["state"])>= BATCH:
                        COLLECT_EVENT.clear()
                        UPDATE_EVENT.set() 
                    # self.ppo.update(bs, ba, br,badv)

                if GLOBAL_EP >= EP_MAX:
                    self.env.close()
                    COORD.request_stop()
                    break
            print("episode = {}, ep_r = {}, wid = {}".format(GLOBAL_EP,ep_r,self.wid))
            GLOBAL_EP += 1
            if GLOBAL_EP != 0 and GLOBAL_EP % 500 == 0:
                self.ppo.save_model(steps=GLOBAL_EP)
Beispiel #38
0
 def calc_nt_freq(self):
     total = np.maximum(1., self.x.sum(axis=2))
     self.nt_freq = ((1-self.e)*(np.divide(self.x, 1.*total[:,:,np.newaxis])).clip(1e-10) + self.e/4.).clip(1e-10)
     return self
Beispiel #39
0
def convert(fname, crop_size, convert_fname):
    img = cv2.imread(fname)
    img_CW = cv2.imread(fname.replace('images_896x896', 'lesion_segs_896x896').replace('.jpg', '/cotton_wool_spots.png'))
    img_FP = cv2.imread(fname.replace('images_896x896', 'lesion_segs_896x896').replace('.jpg', '/fibrous_proliferation.png'))
    img_EX = cv2.imread(fname.replace('images_896x896', 'lesion_segs_896x896').replace('.jpg', '/hard_exudate.png'))
    img_MA = cv2.imread(fname.replace('images_896x896', 'lesion_segs_896x896').replace('.jpg', '/microaneurysm.png'))
    img_NS = cv2.imread(fname.replace('images_896x896', 'lesion_segs_896x896').replace('.jpg', '/neovascularization.png'))
    img_PH = cv2.imread(fname.replace('images_896x896', 'lesion_segs_896x896').replace('.jpg', '/preretinal_hemorrhage.png'))
    img_RH = cv2.imread(fname.replace('images_896x896', 'lesion_segs_896x896').replace('.jpg', '/retinal_hemorrhage.png'))
    img_VH = cv2.imread(fname.replace('images_896x896', 'lesion_segs_896x896').replace('.jpg', '/vitreous_hemorrhage.png'))

    img_CW = img_CW[..., 2] if img_CW is not None else img[..., 2] * 0
    img_FP = img_FP[..., 2] if img_FP is not None else img[..., 2] * 0
    img_EX = img_EX[..., 2] if img_EX is not None else img[..., 2] * 0
    img_MA = img_MA[..., 2] if img_MA is not None else img[..., 2] * 0
    img_NS = img_NS[..., 2] if img_NS is not None else img[..., 2] * 0
    img_PH = img_PH[..., 2] if img_PH is not None else img[..., 2] * 0
    img_RH = img_RH[..., 2] if img_RH is not None else img[..., 2] * 0
    img_VH = img_VH[..., 2] if img_VH is not None else img[..., 2] * 0

    ba = img
    h, w, _ = ba.shape

    if w > 1.2 * h:
        # to get the threshold, compute the maximum value of left and right 1/32-width part
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        left_max = gray[:, : w // 32].max().astype(int)
        right_max = gray[:, - w // 32:].max().astype(int)
        max_bg = np.maximum(left_max, right_max)

        # print(max_bg)  # TODO: DEBUG
        _, foreground = cv2.threshold(gray, max_bg + 20, 255, cv2.THRESH_BINARY)
        bbox = cv2.boundingRect(cv2.findNonZero(foreground))  # (x, y, width, height)

        if bbox is None:
            print('bbox none for {} (???)'.format(fname))
        else:
            left, upper, width, height = bbox

            # if we selected less than 80% of the original 
            # height, just crop the square
            if width < 0.8 * h or height < 0.8 * h:
                print('bbox too small for {}'.format(fname))
                bbox = None
    else:
        bbox = None

    if bbox is None:
        bbox = square_bbox(w, h)

    # do croping
    left, upper, width, height = bbox
    img = img[upper:upper+height, left:left+width, ...]
    img_CW = img_CW[upper:upper+height, left:left+width]
    img_FP = img_FP[upper:upper+height, left:left+width]
    img_EX = img_EX[upper:upper+height, left:left+width]
    img_MA = img_MA[upper:upper+height, left:left+width]
    img_NS = img_NS[upper:upper+height, left:left+width]
    img_PH = img_PH[upper:upper+height, left:left+width]
    img_RH = img_RH[upper:upper+height, left:left+width]
    img_VH = img_VH[upper:upper+height, left:left+width]

    #padding
    if width != height:
        if width > height:
            pad_width = width - height
            pad = ((pad_width//2, pad_width-pad_width//2), (0, 0))
        else:
            pad_width = height - width
            pad = ((0, 0), (pad_width // 2, pad_width - pad_width // 2))
        img = np.pad(img, (pad[0], pad[1], (0,0)), 'constant', constant_values=0)
        img_CW = np.pad(img_CW, pad, 'constant', constant_values=0)
        img_FP = np.pad(img_FP, pad, 'constant', constant_values=0)
        img_EX = np.pad(img_EX, pad, 'constant', constant_values=0)
        img_MA = np.pad(img_MA, pad, 'constant', constant_values=0)
        img_NS = np.pad(img_NS, pad, 'constant', constant_values=0)
        img_PH = np.pad(img_PH, pad, 'constant', constant_values=0)
        img_RH = np.pad(img_RH, pad, 'constant', constant_values=0)
        img_VH = np.pad(img_VH, pad, 'constant', constant_values=0)

    # resizing
    img = cv2.resize(img, (crop_size, crop_size), interpolation=cv2.INTER_CUBIC)
    img_CW = cv2.resize(img_CW, (crop_size, crop_size), interpolation=cv2.INTER_NEAREST)
    img_FP = cv2.resize(img_FP, (crop_size, crop_size), interpolation=cv2.INTER_NEAREST)
    img_EX = cv2.resize(img_EX, (crop_size, crop_size), interpolation=cv2.INTER_NEAREST)
    img_MA = cv2.resize(img_MA, (crop_size, crop_size), interpolation=cv2.INTER_NEAREST)
    img_NS = cv2.resize(img_NS, (crop_size, crop_size), interpolation=cv2.INTER_NEAREST)
    img_PH = cv2.resize(img_PH, (crop_size, crop_size), interpolation=cv2.INTER_NEAREST)
    img_RH = cv2.resize(img_RH, (crop_size, crop_size), interpolation=cv2.INTER_NEAREST)
    img_VH = cv2.resize(img_VH, (crop_size, crop_size), interpolation=cv2.INTER_NEAREST)


    cv2.imwrite(convert_fname, img)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    _, binary = cv2.threshold(gray, 15, 255, cv2.THRESH_BINARY)
    cv2.imwrite(convert_fname.replace('.jpg', '_MASK.png'), binary)

    cv2.imwrite(convert_fname.replace('.jpg', '_CW.png'), img_CW)
    cv2.imwrite(convert_fname.replace('.jpg', '_FP.png'), img_FP)
    cv2.imwrite(convert_fname.replace('.jpg', '_EX.png'), img_EX)
    cv2.imwrite(convert_fname.replace('.jpg', '_MA.png'), img_MA)
    cv2.imwrite(convert_fname.replace('.jpg', '_NS.png'), img_NS)
    cv2.imwrite(convert_fname.replace('.jpg', '_PH.png'), img_PH)
    cv2.imwrite(convert_fname.replace('.jpg', '_RH.png'), img_RH)
    cv2.imwrite(convert_fname.replace('.jpg', '_VH.png'), img_VH)
Beispiel #40
0
            and (not rerun):
        continue

    logging.debug('Aligning session vids: %s', str(session_vids))
    mc, duration, shifts_rig = mc_vids(session_vids, mc_rigid_template)
    fname_mc = mc.fname_tot_els if doPwRigid else mc.fname_tot_rig
    logging.debug('Created motion corrected files: %s', str(fname_mc))

    if mc_rigid_template is None:
        mc_rigid_template = mc.total_template_rig
        np.save(rigid_template_fpath, mc_rigid_template)
    plot_stats(s_fpath, mc, shifts_rig)

    if doPwRigid:
        max_shift = np.ceil(
            np.maximum(np.max(np.abs(mc.x_shifts_els)),
                       np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
    else:
        max_shift = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)

    end_time = time.time()
    mc_stats = dict()
    mc_stats['analysed_datetime'] = analysis_time
    mc_stats['mc_duration'] = duration
    mc_stats['max_shift'] = int(max_shift)
    mc_stats['PwRigid'] = doPwRigid
    vids_crispness, joint_vids_crispness = eval_mc_quality(session_vids)
    mc_stats['crispness_before'] = vids_crispness
    mc_stats['crispness_before_movie_pairs'] = joint_vids_crispness
    vids_crispness, joint_vids_crispness = eval_mc_quality(fname_mc)
    mc_stats['crispness_after'] = vids_crispness
    mc_stats['crispness_after_movie_pairs'] = joint_vids_crispness
Beispiel #41
0
 def lagrange_prox(self, x, lipschitz=1, lagrange=None):
     lagrange = seminorm.lagrange_prox(self, x, lipschitz, lagrange)
     return np.sign(x) * np.maximum(
         np.fabs(x) - lagrange * self.weights / lipschitz, 0)
Beispiel #42
0
def evaluate(
    generator,
    retinanet,
    iou_threshold=0.5,
    score_threshold=0.05,
    max_detections=100,
    save_path=None
):
    """ Evaluate a given dataset using a given retinanet.
    # Arguments
        generator       : The generator that represents the dataset to evaluate.
        retinanet           : The retinanet to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        score_threshold : The score confidence threshold to use for detections.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save images with visualized detections to.
    # Returns
        A dict mapping class names to mAP scores.
    """



    # gather all detections and annotations

    all_detections     = _get_detections(generator, retinanet, score_threshold=score_threshold, max_detections=max_detections, save_path=save_path)
    all_annotations    = _get_annotations(generator)

    average_precisions = {}

    for label in range(generator.num_classes()):
        false_positives = np.zeros((0,))
        true_positives  = np.zeros((0,))
        scores          = np.zeros((0,))
        num_annotations = 0.0

        for i in range(len(generator)):
            detections           = all_detections[i][label]
            annotations          = all_annotations[i][label]
            num_annotations     += annotations.shape[0]
            detected_annotations = []

            for d in detections:
                scores = np.append(scores, d[4])

                if annotations.shape[0] == 0:
                    false_positives = np.append(false_positives, 1)
                    true_positives  = np.append(true_positives, 0)
                    continue

                overlaps            = compute_overlap(np.expand_dims(d, axis=0), annotations)
                assigned_annotation = np.argmax(overlaps, axis=1)
                max_overlap         = overlaps[0, assigned_annotation]

                if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
                    false_positives = np.append(false_positives, 0)
                    true_positives  = np.append(true_positives, 1)
                    detected_annotations.append(assigned_annotation)
                else:
                    false_positives = np.append(false_positives, 1)
                    true_positives  = np.append(true_positives, 0)

        # no annotations -> AP for this class is 0 (is this correct?)
        if num_annotations == 0:
            average_precisions[label] = 0, 0
            continue

        # sort by score
        indices         = np.argsort(-scores)
        false_positives = false_positives[indices]
        true_positives  = true_positives[indices]

        # compute false positives and true positives
        false_positives = np.cumsum(false_positives)
        true_positives  = np.cumsum(true_positives)

        # compute recall and precision
        recall    = true_positives / num_annotations
        precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)

        # compute average precision
        average_precision  = _compute_ap(recall, precision)
        average_precisions[label] = average_precision, num_annotations
    
    print('\nAP:')
    for label in range(generator.num_classes()):
        label_name = generator.label_to_name(label)
        print('{}: {}'.format(label_name, average_precisions[label][0]))
    
    return average_precisions
Beispiel #43
0
                                                    hop_length=hop_length,
                                                    fmax=fmax,
                                                    n_mels=n_mels)
    S = np.mean(S, axis=2)

    #S = cv2.GaussianBlur(S,ksize=(3,3),sigmaX=1,sigmaY=1)
    S_ana_f = copy.deepcopy(S)

    for row in range(
            S.shape[0]
    ):  # normalize each frequency bin to zero mean and unit variance
        S_ana_f[row, :] = (S[row, :] / f_means[row])  #/f_vars[row]

    S_ana_gradx = cv2.Sobel(S_ana_f, cv2.CV_64F, 1, 0, ksize=5)
    S_ana_grady = cv2.Sobel(S_ana_f, cv2.CV_64F, 0, 1, ksize=5)
    S_ana_gradl = np.maximum(np.abs(S_ana_gradx), np.abs(S_ana_grady))
    S_ana_gradl = cv2.morphologyEx(S_ana_gradl, cv2.MORPH_OPEN, np.ones(
        (3, 3)))
    close_mask = cv2.morphologyEx(S_ana_gradl, cv2.MORPH_CLOSE, np.ones(
        (3, 3)))

    close_mask[close_mask <= mask_thres] = 0
    close_mask[close_mask != 0] = 1

    S_ana_log = copy.deepcopy(
        S_ana_f)  # calculate log value of ana spectrogram
    S_ana_log = S_ana_log * close_mask
    S_ana_log[S_ana_log <= 0] = float(
        'nan')  # set all values in S_ana_log that are < db_thres to NaN

    ######################################## Feature Detection ################################################################
Beispiel #44
0
def _area_for_threshold(image, occupancy_mask, peak_rc, threshold,
                        other_fields_linear):
    '''Calculate the area of the field defined by the local maxima 'peak_rc' and
    the relative thresholding value 'threshold'

    In addition, determine:
        * is the field contiguous (good), or does it contain voids? (bad)
        * Does the field extend to include other local maxima?

    1 - Based on the threshold, find the binary map of the field
    2 - Determine if there are any voids
        If there are any voids, then return an area of np.nan
    3 - Determine the co-ordinates of all cells inside the field. The area is
        given by the number of cells
        If any included cell ALSO appears in the list of 'other_fields_linear'
        then another local maxima has been included. In that case, return is_bad=True

    returns
    -------
    area : int
        Number of cells in field OR np.nan if field contains holes
    area_linear_indicies : np.ndarray
        indicies of all cells within field if field is valid
    is_bad : bool
        True IF field includes a second local maxima or IF field contains holes
    '''
    area = np.nan
    # Field is bad if it contains any other peak
    is_bad = False

    peak_value = image[peak_rc[0], peak_rc[1]]
    threshold_value = peak_value * threshold
    mask = (image >= threshold_value)

    # Mask includes all pixels above the desired threshold, including other disconnected fields
    # use morphology.label to exclude disconnected fields
    # connectivity=1 means only consider vertical/horizontal connections
    labeled_img = morphology.label(mask, connectivity=1)

    # we need to leave only one label that corresponds to the peak
    target_label = labeled_img[peak_rc[0], peak_rc[1]]
    labeled_img[labeled_img != target_label] = 0
    labeled_img[labeled_img == target_label] = 1

    #labelled_img only includes cells that are:
    #   Above the current threshold
    #   Connected (V/H) to the currently considered local maxima

    # calclate euler_number by hand rather than by regionprops
    # This yields results that are more similar to Matlab's regionprops
    # NOTE - this uses scipy.ndimage.morphology, while most else uses skimage.morphology
    filled_image = ndimage.morphology.binary_fill_holes(labeled_img)
    euler_array = (filled_image != labeled_img
                   )  # True where holes were filled in

    euler_array = np.maximum((euler_array * 1) - (occupancy_mask * 1), 0)
    # Ignore filled-in holes if it is due to the animal never visiting that location
    # Convert both arrays to integer, subtract one from the other, and replace resulting -1 values with 0
    # NOTE! np.maximum is element-wise, i.e. it returns an array. This is DIFFERENT to np.max, which returns a float.

    euler_objects = morphology.label(
        euler_array,
        connectivity=2)  # connectivity=2 : vertical, horizontal, and diagonal
    num = np.max(euler_objects)  # How many holes were filled in
    euler_number = -num + 1

    if euler_number <= 0:
        # If any holes existed, then return this
        is_bad = True
        return (area, [], is_bad)

    regions = measure.regionprops(labeled_img)
    area = np.sum(labeled_img == 1)
    area_linear_indices = np.ravel_multi_index(
        multi_index=(regions[0].coords[:, 0], regions[0].coords[:, 1]),
        dims=image.shape,
        order='F')  # co-ordinates of members of field
    if len(other_fields_linear) > 0:
        is_bad = len(
            np.intersect1d(area_linear_indices, other_fields_linear)
        ) > 0  # True if any other local maxima occur within this field

    return (area, area_linear_indices, is_bad)
Beispiel #45
0
def olf_bulb_10(Nmitral,H_in,W_in,P_odor_in,dam):   
#    Nmitral = 10 #number of mitral cells
    Ngranule = np.copy(Nmitral) #number of granule cells     pg. 383 of Li/Hop
    Ndim = Nmitral+Ngranule #total number of cells
    t_inh = 25 ; # time when inhalation starts
    t_exh = 205; #time when exhalation starts
    finalt = 395; # end time of the cycle

    #y = zeros(ndim,1);
                              
    Sx = 1.43     #Sx,Sx2,Sy,Sy2 are parameters for the activation functions
    Sx2 = 0.143
    Sy = 2.86     #These are given in Li/Hopfield pg 382, slightly diff in her thesis
    Sy2 = 0.286
    th = 1       #threshold for the activation function
    
    tau_exh = 33.3333; #Exhale time constant, pg. 382 of Li/Hop
    exh_rate = 1/tau_exh
    
    alpha = .15 #decay rate for the neurons
                                    #Li/Hop have it as 1/7 or .142 on pg 383
    
    P_odor0=np.zeros((Nmitral,1)) #odor pattern, no odor

    
    H0 = H_in #weight matrix: to mitral from granule
    W0 = W_in #weights: to granule from mitral
    
    Ib = np.ones((Nmitral,1))*.243 #initial external input to mitral cells
    Ic = np.ones((Ngranule,1))*.1 #initial input to granule cells, these values are
                                  #given on pg 382 of Li/Hop
                                  
    
    signalflag = 1 # 0 for linear output, 1 for activation function
    
    noise = np.zeros((Ndim,1)) #noise in inputs
    noiselevel =  .00143
    noisewidth = 7 #noise correlation time, given pg 383 Li/Hop as 9, but 7 in thesis
    
    lastnoise = np.zeros((Ndim,1)) #initial time of last noise pule
    

    
    #******************************************************************************
    
    #CALCULATE FIXED POINTS
    
    #Calculating equilibrium value with no input
    rest0 = np.zeros((Ndim,1))
    
    restequi = fsolve(lambda x: equi(x,Ndim,Nmitral,Sx,Sx2,Sy,Sy2,th,alpha,\
                                     t_inh,H0,W0,P_odor0,Ib,Ic,dam),rest0) #about 20 ms to run this
    
    np.random.seed(seed=23)
    #init0 = restequi+np.random.rand(Ndim)*.00143 #initial conditions plus some noise
                                               #for no odor input
    init0 = restequi+np.random.rand(Ndim)*.00143 #initial conditions plus some noise
                                           #for no odor input                 
    np.random.seed()
    #Now calculate equilibrium value with odor input
                                                
    
    lastnoise = lastnoise + t_inh - noisewidth  #initialize lastnoise value
                                                #But what is it for? to have some
                                                #kind of correlation in the noise
                                                
    #find eigenvalues of A to see if input produces oscillating signal
    
    xequi = fsolve(lambda x: equi(x,Ndim,Nmitral,Sx,Sx2,Sy,Sy2,th,alpha,\
                                     t_inh,H0,W0,P_odor_in,Ib,Ic,dam),rest0) 
                        #equilibrium values with some input, about 20 ms to run
    
    
    
    
    
    
    #******************************************************************************
    
    #CALCULATE A AND DETERMINE EXISTENCE OF OSCILLATIONS
    
    diffgy = celldiff(xequi[Nmitral:],Sy,Sy2,th)
    diffgx = celldiff(xequi[0:Nmitral],Sx,Sx2,th)
    
    H1 = np.dot(H0,diffgy)  
    W1 = np.dot(W0,diffgx)  #intermediate step in constructing A
    
    A = np.dot(H1,W1)   #Construct A
    
    dA,vA = lin.eig(A) #about 20 ms to run this
                        #Find eigenvalues of A
    
    diff = (1j)*(dA)**.5 - alpha   #criteria for a growing oscillation
    
    negsum = -(1j)*(dA)**.5 - alpha #Same
    
    diff_re = np.real(diff)     
                               #Take the real part
    negsum_re = np.real(negsum)
    
    #do an argmax to return the eigenvalue that will cause the fastest growing oscillations
    #Then do a spectrograph to track the growth of the associated freq through time
    
    indices = np.where(diff_re>0)   #Find the indices where the criteria is met
    indices2 = np.where(negsum_re>0)
    
    #eigenvalues that could lead to growing oscillations
#    candidates = np.append(np.real((dA[indices])**.5),np.real((dA[indices2])**.5))
    largest = np.argmax(diff_re)
    
    check = np.size(indices)
    check2 = np.size(indices2)
    
    
    if check==0 and check2==0:
    #    print("No Odor Recognized")
        dominant_freq = 0
    else:
        dominant_freq = np.real((dA[largest])**.5)/(2*np.pi) #find frequency of the dominant mode
                                                #Divide by 2pi to get to cycles/ms
    #    print("Odor detected. Eigenvalues:",dA[indices],dA[indices2],\
    #          "\nEigenvectors:",vA[indices],vA[indices2],\
    #          "\nDominant Frequency:",dominant_freq)
    
    
    #*************************************************************************
    
    #SOLVE DIFFERENTIAL EQUATIONS TO GET INPUT AND OUTPUTS AS FN'S OF t
      
        #differential equation to solve
    teval = np.r_[0:finalt]                                              
    
    #solve the differential equation
    sol = solve_ivp(lambda t,y: diffeq(t,y,Nmitral,Ngranule,Ndim,lastnoise,\
                    noise,noisewidth,noiselevel, t_inh,t_exh,exh_rate,alpha,Sy,\
                    Sy2,Sx,Sx2,th,H0,W0,P_odor_in,Ic,Ib,dam),\
                    [0,395],init0,t_eval = teval,method = 'RK45') 
    t = sol.t
    y = sol.y
    y = np.transpose(y)
    yout = np.copy(y)
    
    #convert signal into output signal given by the activation fn
    if signalflag ==1:
        for i in np.arange(np.size(t)):
            yout[i,:Nmitral] = cellout(y[i,:Nmitral],Sx,Sx2,th)
            yout[i,Nmitral:] = cellout(y[i,Nmitral:],Sy,Sy2,th)
            
    #solve diffeq for P_odor = 0
    #first, reinitialize lastnoise & noise
    noise = np.zeros((Ndim,1))
    lastnoise = np.zeros((Ndim,1))
    lastnoise = lastnoise + t_inh - noisewidth
    
    sol0 = sol = solve_ivp(lambda t,y: diffeq(t,y,Nmitral,Ngranule,Ndim,lastnoise,\
                    noise,noisewidth,noiselevel, t_inh,t_exh,exh_rate,alpha,Sy,\
                    Sy2,Sx,Sx2,th,H0,W0,P_odor0,Ic,Ib,dam),\
                    [0,395],init0,t_eval = teval,method = 'RK45')
    y0 = sol0.y
    y0 = np.transpose(y0)
    y0out = np.copy(y0)
    
    #convert signal into output signal given by the activation fn
    if signalflag ==1:
        for i in np.arange(np.size(t)):
            y0out[i,:Nmitral] = cellout(y0[i,:Nmitral],Sx,Sx2,th)
            y0out[i,Nmitral:] = cellout(y0[i,Nmitral:],Sy,Sy2,th)
    
    
    
    #*****************************************************************************
    
    #SIGNAL PROCESSING
    
    #Filtering the signal - O_mean: Lowpass fitered signal, under 20 Hz
    #S_h: Highpass filtered signal, over 20 Hz
    
    fs = 1/(.001*(t[1]-t[0]))  #sampling freq, converting from ms to sec
    
    f_c = 15/fs     # Cutoff freq at 20 Hz, written as a ratio of fc to sample freq
    
    flter = np.sinc(2*f_c*(t - (finalt-1)/2))*np.blackman(finalt) #creating the
                                                        #windowed sinc filter
                                                        #centered at the middle
                                                        #of the time data
    flter = flter/np.sum(flter)  #normalize
    
    hpflter = -np.copy(flter)
    hpflter[int((finalt-1)/2)] += 1  #convert the LP filter into a HP filter
    
    Sh = np.zeros(np.shape(yout))
    Sl = np.copy(Sh)
    Sl0 = np.copy(Sh)
    Sbp = np.copy(Sh)
    
    for i in np.arange(Ndim):
        Sh[:,i] = np.convolve(yout[:,i],hpflter,mode='same')
        Sl[:,i] = np.convolve(yout[:,i],flter,mode='same')
        Sl0[:,i] = np.convolve(y0out[:,i],flter,mode='same')
        
    #find the oscillation period Tosc (Tosc must be greater than 5 ms to exclude noise)
    Tosc0 = np.zeros(np.size(np.arange(5,50)))
    for i in np.arange(5,50):
        Sh_shifted=np.roll(Sh,i,axis=0)
        Tosc0[i-5] = np.sum(np.diagonal(np.dot(np.transpose(Sh[:,:Nmitral]),Sh_shifted[:,:Nmitral])))
        #That is, do the correlation matrix (time correlation), take the diagonal to
        #get the autocorrelations, and find the max
    Tosc = np.argmax(Tosc0)
    Tosc = Tosc + 5
    
    f_c2 = 1000*(1.3/Tosc)/fs  #Filter out components with frequencies higher than this
                            #to get rid of noise effects in cross-correlation
                            #times 1000 to get units right
    
    flter2 = np.sinc(2*f_c2*(t - (finalt-1)/2))*np.blackman(finalt)
    flter2 = flter2/np.sum(flter2)
    
    for i in np.arange(Ndim):
        Sbp[:,i] = np.convolve(Sh[:,i],flter2,mode='same')
    
    
    
    #CALCULATE THE DISTANCE MEASURES
    
    #calculate phase via cross-correlation with each cell
    phase = np.zeros(Nmitral)
    
    for i in np.arange(1,Nmitral):
        crosscor = signal.correlate(Sbp[:,0],Sbp[:,i])
        tdiff = np.argmax(crosscor)-(finalt-1)
        phase[i] = tdiff/Tosc * 2*np.pi
        
    #Problem with the method below is that it will only give values from 0 to pi
    #for i in np.arange(1,Nmitral):
    #    phase[i]=np.arccos(np.dot(Sbp[:,0],Sbp[:,i])/(lin.norm(Sbp[:,0])*lin.norm(Sbp[:,i])))
    
    OsciAmp = np.zeros(Nmitral)
    Oosci = np.copy(OsciAmp)*0j
    Omean = np.zeros(Nmitral)
    
    for i in np.arange(Nmitral):
        OsciAmp[i] = np.sqrt(np.sum(Sh[125:250,i]**2)/np.size(Sh[125:250,i]))
        Oosci[i] = OsciAmp[i]*np.exp(1j*phase[i])
        Omean[i] = np.average(Sl[:,i] - Sl0[:,i])
    
    Omean = np.maximum(Omean,0)
    
    Ooscibar = np.sqrt(np.dot(Oosci,np.conjugate(Oosci)))/Nmitral #can't just square b/c it's complex
    Omeanbar = np.sqrt(np.dot(Omean,Omean))/Nmitral
    

    maxlam = np.max(np.abs(np.imag(np.sqrt(dA))))    
    
        
    return yout,y0out,Sh,t,OsciAmp,Omean,Oosci,Omeanbar,Ooscibar,dominant_freq,maxlam
def _bin_meop(config, inVarName, outVarName):
    res = get_res(config)
    outFileName = 'meop/meop_{}_{}.nc'.format(outVarName, res)
    if os.path.exists(outFileName):
        return

    hres = get_horiz_res(config)
    dz = config.getfloat('grid', 'dzExtrap')
    nz = config.getint('grid', 'nzExtrap')
    zOut = dz*numpy.arange(nz+1)
    z = 0.5*(zOut[0:-1] + zOut[1:])
    z_bnds = numpy.zeros((len(z), 2))
    z_bnds[:, 0] = zOut[0:-1]
    z_bnds[:, 1] = zOut[1:]

    ds = xarray.open_dataset('ismip6/{}_grid.nc'.format(hres))
    ds['z'] = (('z',), z)
    ds.z.attrs['units'] = 'meters'
    ds.z.attrs['bounds'] = 'z_bnds'
    ds.z.attrs['standard_name'] = 'depth'
    ds.z.attrs['positive'] = 'up'
    ds.z.attrs['axis'] = 'Z'

    ds['z_bnds'] = (('z', 'nbounds'), z_bnds)
    ds.z_bnds.attrs['comment'] = 'depth bounds'

    xMin = ds.x[0].values
    yMin = ds.y[0].values
    zMin = z[0]
    dx = ds.x[1].values - ds.x[0].values

    nx = ds.sizes['x']
    ny = ds.sizes['y']
    nz = ds.sizes['z']

    outField = numpy.zeros((nz, ny, nx))
    entryCount = numpy.zeros((nz, ny, nx), dtype=int)

    attrs = None

    proj = get_antarctic_stereographic_projection()

    fileList = sorted(glob.glob('meop/MEOP-CTD_2018-04-10/*/DATA_ncARGO/*.nc'))
    print('  Binning MEOP {} profiles...'.format(outVarName))

    widgets = ['  ', progressbar.Percentage(), ' ',
               progressbar.Bar(), ' ', progressbar.ETA()]
    bar = progressbar.ProgressBar(widgets=widgets,
                                  maxval=len(fileList)).start()

    for index, fileName in enumerate(fileList):
        dsProfile = xarray.open_dataset(fileName)
        lat = dsProfile.LATITUDE.values
        lon = dsProfile.LONGITUDE.values
        inField = dsProfile['{}_ADJUSTED'.format(inVarName)].values
        quality = dsProfile['{}_ADJUSTED_QC'.format(inVarName)].values
        if attrs is None:
            attrs = dsProfile[inVarName].attrs
        x, y = proj(lon, lat)
        pressure = dsProfile.PRES.values

        lat = numpy.maximum(lat, -75.)
        for profile in range(pressure.shape[0]):
            xBin = int((x[profile]-xMin)/dx)
            yBin = int((y[profile]-yMin)/dx)
            if xBin < 0 or xBin >= nx:
                continue
            if yBin < 0 or yBin >= ny:
                continue
            for level in range(pressure.shape[1]):
                if quality[profile, level] != b'1':
                    continue
                press = pressure[profile, level]
                if numpy.isnan(press):
                    continue
                depth = gsw.z_from_p(pressure[profile, level], lat[profile])
                zBin = int((depth-zMin)/dz)
                if zBin < 0 or zBin >= nz:
                    continue
                outField[zBin, yBin, xBin] += inField[profile, level]
                entryCount[zBin, yBin, xBin] += 1
        bar.update(index+1)
    bar.finish()
    mask = entryCount > 0
    outField[mask] /= entryCount[mask]
    outField[numpy.logical_not(mask)] = numpy.nan
    ds[outVarName] = (('z', 'y', 'x'), outField)
    for attr in ['units', 'long_name', 'comment']:
        ds[outVarName].attrs[attr] = attrs[attr]
    ds.to_netcdf(outFileName)
Beispiel #47
0
def relu(x):
    '''Vectorized RELU'''
    return np.maximum(0, x)
Beispiel #48
0
def calc_flow(depth_src,
              pose_src,
              pose_tgt,
              K,
              depth_tgt,
              thresh=3e-3,
              standard_rep=False):
    """
    project the points in source corrd to target corrd
    :param standard_rep:
    :param depth_src: depth image of source(m)
    :param pose_src: pose matrix of soucre, [R|T], 3x4
    :param depth_tgt: depth image of target
    :param pose_tgt: pose matrix of target, [R|T], 3x4
    :param K: intrinsic_matrix
    :param depth_tgt: depth image of target(m)
    :return: visible: whether points in source can be viewed in target
    :return: flow: flow from source to target
    """
    height = depth_src.shape[0]
    width = depth_src.shape[1]
    visible = np.zeros(depth_src.shape[:2]).flatten()
    X = backproject_camera(depth_src, intrinsic_matrix=K)
    transform = np.matmul(K, se3_mul(pose_tgt, se3_inverse(pose_src)))
    Xp = np.matmul(
        transform,
        np.append(X, np.ones([1, X.shape[1]], dtype=np.float32), axis=0))

    pz = Xp[2] + 1e-15
    pw = Xp[0] / pz
    ph = Xp[1] / pz

    valid_points = np.where(depth_src.flatten() != 0)[0]
    depth_proj_valid = pz[valid_points]
    pw_valid_raw = np.round(pw[valid_points]).astype(int)
    pw_valid = np.minimum(np.maximum(pw_valid_raw, 0), width - 1)
    ph_valid_raw = np.round(ph[valid_points]).astype(int)
    ph_valid = np.minimum(np.maximum(ph_valid_raw, 0), height - 1)
    p_within = np.logical_and(
        np.logical_and(pw_valid_raw >= 0, pw_valid_raw < width),
        np.logical_and(ph_valid_raw >= 0, ph_valid_raw < height),
    )

    depth_tgt_valid = depth_tgt[ph_valid, pw_valid]

    p_within = np.logical_and(
        p_within,
        np.abs(depth_tgt_valid - depth_proj_valid) < thresh)
    p_valid = np.abs(depth_tgt_valid) > 1e-10
    fg_points = valid_points[np.logical_and(p_within, p_valid)]
    visible[fg_points] = 1
    visible = visible.reshape(depth_src.shape[:2])
    w_ori, h_ori = np.meshgrid(np.linspace(0, width - 1, width),
                               np.linspace(0, height - 1, height))
    if standard_rep:
        flow = np.dstack([
            pw.reshape(depth_src.shape[:2]) - w_ori,
            ph.reshape(depth_src.shape[:2]) - h_ori
        ])
    else:
        # depleted version, only used in old code
        flow = np.dstack([
            ph.reshape(depth_src.shape[:2]) - h_ori,
            pw.reshape(depth_src.shape[:2]) - w_ori
        ])
    flow[np.dstack([visible, visible]) != 1] = 0
    assert np.isnan(flow).sum() == 0
    X_valid = np.array([c[np.where(visible.flatten())] for c in X])
    return flow, visible, X_valid
Beispiel #49
0
    def fwph_main(self):
        self.t0 = time.time()
        best_bound = self.fw_prep()

        # FWPH takes some time to initialize
        # If run as a spoke, check for convergence here
        if self.spcomm and self.spcomm.is_converged():
            return None, None, None

        # The body of the algorithm
        for itr in range(self.options['PHIterLimit']):
            self._PHIter = itr
            self._local_bound = 0
            for name in self.local_subproblems:
                dual_bound = self.SDM(name)
                self._local_bound += self.local_subproblems[name]._mpisppy_probability * \
                                     dual_bound
            self._compute_dual_bound()
            if (self.is_minimizing):
                best_bound = np.maximum(best_bound, self._local_bound)
            else:
                best_bound = np.minimum(best_bound, self._local_bound)

            ## Hubs/spokes take precedence over convergers
            if self.spcomm:
                if self.spcomm.is_converged():
                    secs = time.time() - self.t0
                    self._output(itr+1, self._local_bound, 
                                 best_bound, np.nan, secs)
                    if (self.cylinder_rank == 0 and self.vb):
                        print('FWPH converged to user-specified criteria')
                    break
                self.spcomm.sync()
            if (self.PH_converger):
                self.Compute_Xbar(self.options['verbose'])
                diff = self.convobject.convergence_value()
                if (self.convobject.is_converged()):
                    secs = time.time() - self.t0
                    self._output(itr+1, self._local_bound, 
                                 best_bound, diff, secs)
                    if (self.cylinder_rank == 0 and self.vb):
                        print('FWPH converged to user-specified criteria')
                    break
            else: # Convergence check from Boland
                diff = self._conv_diff()
                self.Compute_Xbar(self.options['verbose'])
                if (diff < self.options['convthresh']):
                    secs = time.time() - self.t0
                    self._output(itr+1, self._local_bound, 
                                 best_bound, diff, secs)
                    if (self.cylinder_rank == 0 and self.vb):
                        print('PH converged based on standard criteria')
                    break

            secs = time.time() - self.t0
            self._output(itr+1, self._local_bound, best_bound, diff, secs)
            self.Update_W(self.options['verbose'])
            timed_out = self._is_timed_out()
            if (self._is_timed_out()):
                if (self.cylinder_rank == 0 and self.vb):
                    print('Timeout.')
                break

        self._swap_nonant_vars_back()
        weight_dict = self._gather_weight_dict() # None if rank != 0
        xbars_dict  = self._get_xbars() # None if rank != 0
        return itr+1, weight_dict, xbars_dict
Beispiel #50
0
	def fetch(self, next_chunk_set, seg_idx, chunk_idx, take_action, num_chunk, playing_speed = 1.0):
		# Action initialization
		# print "start fetching, seg idx is:", seg_idx
		start_state = self.state
		chunk_size = next_chunk_set # in Kbits not KBytes
		chunk_start_time = seg_idx * self.seg_duration + chunk_idx * self.chunk_duration
		# as mpd is based on prediction, there is noise
		# chunk_size = np.random.uniform(CHUNK_RANDOM_RATIO_LOW*chunk_size, CHUNK_RANDOM_RATIO_HIGH*chunk_size)
		chunk_sent = 0.0	# in Kbits
		downloading_fraction = 0.0	# in ms
		freezing_fraction = 0.0	# in ms
		time_out = 0
		rtt = 0.0
		# Handle RTT 
		if take_action:
			rtt = np.random.uniform(RTT_LOW, RTT_HIGH) 	# in ms
			# rtt = RTT_LOW	# For upper bound calculation
			duration = self.time_trace[self.time_idx] * MS_IN_S - self.last_trace_time	# in ms
			if duration > rtt:
				self.last_trace_time += rtt
			else:

				temp_rtt = rtt
				while duration < temp_rtt:
					self.last_trace_time = self.time_trace[self.time_idx] * MS_IN_S
					self.time_idx += 1
					if self.time_idx >= len(self.time_trace):
						self.time_idx = 1
						self.last_trace_time = 0.0
					temp_rtt -= duration
					duration = self.time_trace[self.time_idx] * MS_IN_S - self.last_trace_time
				self.last_trace_time += temp_rtt

				# temp_rtt = rtt - duration
				# self.last_trace_time = self.time_trace[self.time_idx] * MS_IN_S	# in ms
				# self.time_idx += 1
				# if self.time_idx >= len(self.time_trace):
				# 	self.time_idx = 1
				# 	self.last_trace_time = 0.0
				# self.last_trace_time += temp_rtt
				assert self.last_trace_time < self.time_trace[self.time_idx] * MS_IN_S
			downloading_fraction += rtt
			assert self.state == 1 or self.state == 0
			# Check whether during startup
			if self.state == 1:
				self.playing_time += np.minimum(self.buffer, playing_speed*rtt)			# modified based on playing speed, adjusted, * speed
				freezing_fraction += np.maximum(rtt - self.buffer/playing_speed, 0.0)	# modified based on playing speed, real time, /speed
				self.buffer = np.maximum(0.0, self.buffer - playing_speed*rtt)			# modified based on playing speed, adjusted, * speed
				# chech whether enter freezing
				if freezing_fraction > 0.0:
					self.state = 2
			else:
				freezing_fraction += rtt 	# in ms
		# Chunk downloading
		while True:
			throughput = self.throughput_trace[self.time_idx]	# in Mbps or Kbpms
			duration = self.time_trace[self.time_idx] * MS_IN_S - self.last_trace_time		# in ms
			deliverable_size = throughput * duration * PACKET_PAYLOAD_PORTION	# in Kbits		
			# Will also check whether freezing time exceeds the TOL
			if deliverable_size + chunk_sent > chunk_size:
				fraction = (chunk_size - chunk_sent) / (throughput * PACKET_PAYLOAD_PORTION)	# in ms, real time
				if self.state == 1:
					assert freezing_fraction == 0.0
					temp_freezing = np.maximum(fraction - self.buffer/playing_speed, 0.0)		# modified based on playing speed
					if temp_freezing > self.latency_tol:
						# should not happen
						time_out = 1
						self.last_trace_time += self.buffer/playing_speed + self.freezing_tol
						downloading_fraction += self.buffer/playing_speed + self.freezing_tol
						self.playing_time += self.buffer
						chunk_sent += (self.freezing_tol + self.buffer/playing_speed) * throughput * PACKET_PAYLOAD_PORTION	# in Kbits	
						self.state = 0
						self.buffer = 0.0
						assert chunk_sent < chunk_size
						return chunk_sent, downloading_fraction, freezing_fraction, time_out, start_state, rtt

					downloading_fraction += fraction
					self.last_trace_time += fraction
					freezing_fraction += np.maximum(fraction - self.buffer/playing_speed, 0.0)	# modified based on playing speed 
					self.playing_time += np.minimum(self.buffer, playing_speed*fraction)		# modified based on playing speed 
					self.buffer = np.maximum(self.buffer - playing_speed*fraction, 0.0)			# modified based on playing speed 
					if np.round(self.playing_time + self.buffer, 2) == np.round(chunk_start_time, 2):
						self.buffer += self.chunk_duration * num_chunk
					else:
						# Should not happen in normal case, this is constrain for training
						self.buffer = self.chunk_duration * num_chunk
						self.playing_time = chunk_start_time
					break
				# Freezing
				elif self.state == 2:
					assert self.buffer == 0.0
					if freezing_fraction + fraction > self.freezing_tol:
						time_out = 1
						self.last_trace_time += self.freezing_tol - freezing_fraction
						downloading_fraction += self.freezing_tol - freezing_fraction
						chunk_sent += (self.freezing_tol - freezing_fraction) * throughput * PACKET_PAYLOAD_PORTION	# in Kbits
						freezing_fraction = self.freezing_tol
						self.state = 0
						assert chunk_sent < chunk_size
						return chunk_sent, downloading_fraction, freezing_fraction, time_out, start_state, rtt
					freezing_fraction += fraction
					self.last_trace_time += fraction
					downloading_fraction += fraction
					self.buffer += self.chunk_duration * num_chunk
					self.playing_time = chunk_start_time
					self.state = 1
					break

				else:
					assert self.buffer < self.start_up_th
					# if freezing_fraction + fraction > self.freezing_tol:
					# 	self.buffer = 0.0
					# 	time_out = 1
					# 	self.last_trace_time += self.freezing_tol - freezing_fraction	# in ms
					# 	downloading_fraction += self.freezing_tol - freezing_fraction
					# 	chunk_sent += (self.freezing_tol - freezing_fraction) * throughput * PACKET_PAYLOAD_PORTION	# in Kbits
					# 	freezing_fraction = self.freezing_tol
					# 	# Download is not finished, chunk_size is not the entire chunk
					# 	# print()
					# 	assert chunk_sent < chunk_size
					# 	return chunk_sent, downloading_fraction, freezing_fraction, time_out, start_state
					downloading_fraction += fraction
					self.buffer += self.chunk_duration * num_chunk
					freezing_fraction += fraction
					self.last_trace_time += fraction
					if self.buffer >= self.start_up_th:
						# Because it might happen after one long freezing (not exceed freezing tol)
						# And resync, enter initial phase
						buffer_end_time = chunk_start_time + self.chunk_duration * num_chunk
						self.playing_time = buffer_end_time - self.buffer
						# print buffer_end_time, self.buffer, " This is playing time"
						self.state = 1
					break

			# One chunk downloading does not finish
			# traceing
			if self.state == 1:
				assert freezing_fraction == 0.0
				temp_freezing = np.maximum(duration - self.buffer/playing_speed, 0.0)		# modified based on playing speed
				self.playing_time += np.minimum(self.buffer, playing_speed*duration)		# modified based on playing speed
				# Freezing time exceeds tolerence
				if temp_freezing > self.freezing_tol:
					# should not happen
					time_out = 1
					self.last_trace_time += self.freezing_tol + self.buffer/playing_speed
					downloading_fraction += self.freezing_tol + self.buffer/playing_speed
					freezing_fraction = self.freezing_tol
					self.playing_time += self.buffer
					self.buffer = 0.0
					# exceed TOL, enter startup, freezing time equals TOL
					self.state = 0
					chunk_sent += (self.freezing_tol + self.buffer/playing_speed) * throughput * PACKET_PAYLOAD_PORTION	# in Kbits
					assert chunk_sent < chunk_size
					return chunk_sent, downloading_fraction, freezing_fraction, time_out, start_state, rtt

				chunk_sent += duration * throughput * PACKET_PAYLOAD_PORTION	# in Kbits
				downloading_fraction += duration 	# in ms
				self.last_trace_time = self.time_trace[self.time_idx] * MS_IN_S	# in ms
				self.time_idx += 1
				if self.time_idx >= len(self.time_trace):
					self.time_idx = 1
					self.last_trace_time = 0.0	# in ms
				self.buffer = np.maximum(self.buffer - playing_speed*duration, 0.0)			# modified based on playing speed
				# update buffer and state
				if temp_freezing > 0:
					# enter freezing
					self.state = 2
					assert self.buffer == 0.0
					freezing_fraction += temp_freezing

			# Freezing during trace
			elif self.state == 2:
				assert self.buffer == 0.0
				if duration + freezing_fraction > self.freezing_tol:
					time_out = 1
					self.last_trace_time += self.freezing_tol - freezing_fraction	# in ms
					self.state = 0
					downloading_fraction += self.freezing_tol - freezing_fraction
					chunk_sent += (self.freezing_tol - freezing_fraction) * throughput * PACKET_PAYLOAD_PORTION	# in Kbits
					freezing_fraction = self.freezing_tol
					# Download is not finished, chunk_size is not the entire chunk
					assert chunk_sent < chunk_size
					return chunk_sent, downloading_fraction, freezing_fraction, time_out, start_state, rtt

				freezing_fraction += duration 	# in ms
				chunk_sent += duration * throughput * PACKET_PAYLOAD_PORTION	# in kbits
				downloading_fraction += duration 	# in ms
				self.last_trace_time = self.time_trace[self.time_idx] * MS_IN_S	# in ms
				self.time_idx += 1
				if self.time_idx >= len(self.time_trace):
					self.time_idx = 1
					self.last_trace_time = 0.0	# in ms
			# Startup
			else:
				assert self.buffer < self.start_up_th
				# if freezing_fraction + duration > self.freezing_tol:
				# 	self.buffer = 0.0
				# 	time_out = 1
				# 	self.last_trace_time += self.freezing_tol - freezing_fraction	# in ms
				# 	downloading_fraction += self.freezing_tol - freezing_fraction
				# 	chunk_sent += (self.freezing_tol - freezing_fraction) * throughput * PACKET_PAYLOAD_PORTION	# in Kbits
				# 	freezing_fraction = self.freezing_tol
				# 	# Download is not finished, chunk_size is not the entire chunk
				# 	assert chunk_sent < chunk_size
				# 	return chunk_sent, downloading_fraction, freezing_fraction, time_out, start_state
				chunk_sent += duration * throughput * PACKET_PAYLOAD_PORTION
				downloading_fraction += duration
				self.last_trace_time = self.time_trace[self.time_idx] * MS_IN_S	# in ms
				self.time_idx += 1
				if self.time_idx >= len(self.time_trace):
					self.time_idx = 1
					self.last_trace_time = 0.0	# in ms
				freezing_fraction += duration
		# Finish downloading
		# if self.buffer > BUFFER_TH:
		# 	# Buffer is too long, need sleep
		# 	sleep = np.ceil((self.buffer - BUFFER_TH)/SLEEP_STEP) * SLEEP_STEP
		# 	self.buffer -= sleep
		# 	temp_sleep = sleep
		# 	while True:
		# 		duration = self.time_trace[self.time_idx] * MS_IN_S - self.last_trace_time
		# 		if duration > temp_sleep:
		# 			self.last_trace_time += temp_sleep
		# 			break
		# 		temp_sleep -= duration
		# 		self.last_trace_time = self.time_trace[self.time_idx]
		# 		self.time_idx += 1
		# 		if self.time_idx >= len(self.time_trace):
		# 			self.time_idx = 1
		# 			self.last_trace_time = 0.0
		# 	assert self.state == 1
		return chunk_size, downloading_fraction, freezing_fraction, time_out, start_state, rtt
Beispiel #51
0
    def _do_agent_eval(self, vcocodb, detections_file, ovr_thresh=0.5):

        output_txt = detections_file[:len(detections_file) - 4] + '.txt'

        with open(detections_file, 'rb') as f:
            dets = pickle.load(f)

        tp = [[] for a in range(self.num_actions)]
        fp = [[] for a in range(self.num_actions)]
        sc = [[] for a in range(self.num_actions)]

        npos = np.zeros((self.num_actions), dtype=np.float32)

        for i in range(len(vcocodb)):
            image_id = vcocodb[i][
                'id']  # img ID, not the full name (e.g. id= 165, 'file_name' = COCO_train2014_000000000165.jpg )
            gt_inds = np.where(vcocodb[i]['gt_classes'] == 1)[
                0]  # index of the person's box among all object boxes
            # person boxes
            gt_boxes = vcocodb[i]['boxes'][
                gt_inds]  # all person's boxes in this image
            gt_actions = vcocodb[i]['gt_actions'][
                gt_inds]  # index of Nx26 binary matrix indicating the actions
            # some peorson instances don't have annotated actions
            # we ignore those instances
            ignore = np.any(gt_actions == -1, axis=1)

            for aid in range(self.num_actions):
                npos[aid] += np.sum(
                    gt_actions[:, aid] == 1
                )  # how many actions are involved in this image(for all the human)

            pred_agents, _ = self._collect_detections_for_image(dets, image_id)
            # For each image, we have a pred_agents. For example, there are 2 people detected, then pred_agents is a 2x(4+26) matrix. Each row stands for a human, 0-3 human box, 4-25 the score for each action.

            for aid in range(self.num_actions):

                # keep track of detected instances for each action
                covered = np.zeros(
                    (gt_boxes.shape[0]), dtype=np.bool
                )  # gt_boxes.shape[0] is the number of people in this image

                agent_scores = pred_agents[:, 4 +
                                           aid]  # score of this action for all people in this image
                agent_boxes = pred_agents[:, :
                                          4]  # predicted buman box for all people in this image
                # remove NaNs
                # If only use agent, there should be no NAN cause there is no object information provided. Just give a agent score.
                valid = np.where(np.isnan(agent_scores) == False)[0]
                agent_scores = agent_scores[valid]
                agent_boxes = agent_boxes[valid, :]

                # sort in descending order
                idx = agent_scores.argsort(
                )[::
                  -1]  # For this action, sort score of all people. A action cam be done by many people.

                for j in idx:  # Each predicted person
                    pred_box = agent_boxes[j, :]  # It's predicted human box
                    overlaps = get_overlap(
                        gt_boxes, pred_box
                    )  # overlap between this predict human and all human gt_boxes

                    jmax = overlaps.argmax(
                    )  # Find the idx of gt human box that matches this predicted human
                    ovmax = overlaps.max()

                    # if matched with an instance with no annotations
                    # continue
                    if ignore[jmax]:
                        continue

                    is_true_action = (
                        gt_actions[jmax, aid] == 1
                    )  # Is this person actually doing this action according to gt?

                    sc[aid].append(
                        agent_scores[j]
                    )  # The predicted score of this person doing this action. In descending order.
                    if is_true_action and (
                            ovmax >= ovr_thresh
                    ):  # bounding box IOU is larger than 0.5 and this this person is doing this action.
                        if covered[jmax]:
                            fp[aid].append(1)
                            tp[aid].append(0)
                        else:  # first time see this gt human
                            fp[aid].append(0)
                            tp[aid].append(1)
                            covered[jmax] = True
                    else:
                        fp[aid].append(1)
                        tp[aid].append(0)

        # compute ap for each action
        agent_ap = np.zeros((self.num_actions), dtype=np.float32)
        for aid in range(self.num_actions):
            a_fp = np.array(fp[aid], dtype=np.float32)
            a_tp = np.array(tp[aid], dtype=np.float32)
            a_sc = np.array(sc[aid], dtype=np.float32)
            # sort in descending score order
            idx = a_sc.argsort(
            )[::
              -1]  # For each action, sort the score of all predicted people in all images
            a_fp = a_fp[idx]
            a_tp = a_tp[idx]
            a_sc = a_sc[idx]

            a_fp = np.cumsum(a_fp)
            a_tp = np.cumsum(a_tp)
            rec = a_tp / float(npos[aid])
            # check

            assert (np.amax(rec) <= 1)
            prec = a_tp / np.maximum(a_tp + a_fp, np.finfo(np.float64).eps)
            agent_ap[aid] = voc_ap(rec, prec)

        f = open(output_txt, 'w')
        print('---------Reporting Agent AP (%)------------------')
        f.write('---------Reporting Agent AP (%)------------------\n')
        for aid in range(self.num_actions):
            info = '{: >20}: AP = {:0.2f} (#pos = {:d})'.format(
                self.actions[aid], agent_ap[aid] * 100.0, int(npos[aid]))
            print(info)
            f.write(info)
            f.write('\n')
        info = 'Average Agent AP = %.2f' % (np.nansum(agent_ap) * 100.00 /
                                            self.num_actions)
        print(info)
        f.write(info)
        f.write('\n')
        print('---------------------------------------------')
        f.write('---------------------------------------------\n')
        f.close()
	def compute_cell_information(self, obj_model_dict):

		cached_information = dict()

		# First we obtain a sample from the Pareto Frontier of NUM_POINTS_FRONTIER

		moop = MOOP(obj_model_dict, obj_model_dict, self.input_space, False)
		
		grid = sobol_grid.generate(self.input_space.num_dims, self.input_space.num_dims * GRID_SIZE)

		if USE_GRID_ONLY == True:

			moop.solve_using_grid(grid)

			for i in range(len(obj_model_dict.keys())):
				result = self.find_optimum_gp(obj_model_dict[ obj_model_dict.keys()[ i ] ], grid)
				moop.append_to_population(result)
		else:

			assert NSGA_POP > len(obj_model_dict.keys()) + 1

			moop.solve_using_grid(grid)

			for i in range(len(obj_model_dict.keys())):
				result = self.find_optimum_gp(obj_model_dict[ obj_model_dict.keys()[ i ] ], grid)
				moop.append_to_population(result)

			pareto_set = moop.compute_pareto_front_and_set_summary(NSGA_POP)['pareto_set']

			moop.initialize_population(np.maximum(NSGA_POP - pareto_set.shape[ 0 ], 0))

			for i in range(pareto_set.shape[ 0 ]):
				moop.append_to_population(pareto_set[ i, : ])

			moop.evolve_population_only(NSGA_EPOCHS)

			for i in range(pareto_set.shape[ 0 ]):
				moop.append_to_population(pareto_set[ i, : ])

		result = moop.compute_pareto_front_and_set_summary(NUM_POINTS_FRONTIER)

		print 'Inner multi-objective problem solved!'

		means_objectives = np.zeros((obj_model_dict[ obj_model_dict.keys()[ 0 ] ].inputs.shape[ 0 ], len(obj_model_dict)))

		k = 0
		for obj in obj_model_dict:
			means_objectives[ :, k ] = obj_model_dict[ obj ].predict(obj_model_dict[ obj ].inputs)[ 0 ]
			k += 1

		v_inf = np.ones((1, len(obj_model_dict))) * np.inf
		v_ref = np.ones((1, len(obj_model_dict))) * 1e3

		# We add the non-dominated prediction and the observed inputs to the frontier

		frontier = result['frontier']
		frontier = np.vstack((frontier, means_objectives))
		frontier = frontier[ _cull_algorithm(frontier), ]

		# We remove repeated entries from the pareto front

		X = frontier[ 0 : 1, : ]

		for i in range(frontier.shape[ 0 ]):
			if np.min(cdist(frontier[ i : (i + 1), : ], X)) > 1e-8:
			    	X = np.vstack((X, frontier[ i, ])) 

		frontier = X

		cached_information['frontier'] = frontier

		# We sort the entries in the pareto frontier

		frontier_sorted = np.vstack((-v_inf, cached_information['frontier'], v_ref, v_inf))

		for i in range(len(obj_model_dict)):
			frontier_sorted[ :, i ] = np.sort(frontier_sorted[ :, i ])

		# Now we build the info associated to each cell

		n_repeat = (frontier_sorted.shape[ 0 ] - 2) ** frontier_sorted.shape[ 1 ]

		cached_information['cells'] = dict()

		added_cells = 0
		for i in range(n_repeat):

			cell = dict()

			indices = np.zeros(len(obj_model_dict)).astype(int)

			j = i

			for k in range(len(obj_model_dict)):
				indices[ k ] = int(j % (frontier_sorted.shape[ 0 ] - 2))
				j = np.floor(j / (frontier_sorted.shape[ 0 ] - 2))

			u = np.zeros(len(obj_model_dict))

			for k in range(len(obj_model_dict)):
				u[ k ] = frontier_sorted[ int(indices[ k ] + 1), k ]
			
			l = np.zeros(len(obj_model_dict))
				
			for k in range(len(obj_model_dict)):
				l[ k ] = frontier_sorted[ indices[ k ], k ]

			# If the cell is dominated we discard it

			is_dominated = False
			for k in range(frontier.shape[ 0 ]):
				if np.all(l >= frontier[ k, : ]):
					is_dominated = True

			if is_dominated:
				continue

			# We find the vector v

			v = np.zeros(len(obj_model_dict))

			for k in range(len(obj_model_dict)):

				l_tmp = np.copy(l)

				for j in range(int(frontier_sorted.shape[ 0 ] - indices[ k ] - 1)):
					l_tmp[ k ] = frontier_sorted[ indices[ k ] + j, k ]

					dominates_all = True
					for h in range(frontier.shape[ 0 ]):
						if np.all(frontier[ h, : ] <= l_tmp):
							dominates_all = False
							break

					if dominates_all == False:
						break
					
				if dominates_all == False:
					v[ k ] = l_tmp[ k ]
				else:
					v[ k ] = v_ref[ 0, k ]

			# We compute the quantities required for evaluating the gain in hyper-volume

			# We find the points dominated by u

			dominated_by_u = frontier
			h = 0
			while (h < dominated_by_u.shape[ 0 ]):
				if (not np.any(u < dominated_by_u[ h, : ])) and (not np.all(u == dominated_by_u[ h, : ])):
					dominated_by_u = np.delete(dominated_by_u, (h), axis = 0)
				else:
					h+= 1

			# The value of minusQ2plusQ3 is given by the hypervolume of the dominated points with reference v

			if dominated_by_u.shape[ 0 ] == 0:
				minusQ2plusQ3 = 0.0
			else:
				hv = HyperVolume(v.tolist())
				minusQ2plusQ3 = -hv.compute(dominated_by_u.tolist())
			
			cell['u'] = u
			cell['l'] = l
			cell['v'] = v
			cell['dominated_by_u'] = dominated_by_u
			cell['minusQ2plusQ3'] = minusQ2plusQ3
			
			cached_information['cells'][ str(added_cells) ] = cell
			added_cells += 1
			
		n_cells = added_cells

		cached_information['n_cells'] = n_cells
		cached_information['v_ref'] = v_ref[ 0, : ]
		cached_information['n_objectives'] = len(obj_model_dict)
#		self.print_cell_info(cached_information)

		return cached_information
Beispiel #53
0
 def limLog(self, x):
     MINLOG = 1e-1000
     return np.log(np.maximum(x, MINLOG))
Beispiel #54
0
def clip_xyxy_to_image(x1, y1, x2, y2, height, width):
    x1 = np.minimum(width - 1., np.maximum(0., x1))
    y1 = np.minimum(height - 1., np.maximum(0., y1))
    x2 = np.minimum(width - 1., np.maximum(0., x2))
    y2 = np.minimum(height - 1., np.maximum(0., y2))
    return x1, y1, x2, y2
Beispiel #55
0
    def fit(self, X, y):
        """Fit model according to X and y.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples
            and n_features is the number of features.

        y : array-like, shape = [n_samples]
            Target values.

        Returns
        -------
        self : classifier
            Returns self.
        """
        rs = self._get_random_state()

        # Create dataset
        ds = get_dataset(X, order="fortran")
        n_samples = ds.get_n_samples()
        n_features = ds.get_n_features()

        if self.penalty != "l1/l2" and self.multiclass:
            raise NotImplementedError(
                "True multiclass options not implemented "
                "for non group-lasso(l1/l2) penalties.")

        # Create label transformers
        #neg_label = 0 if self.penalty == "nn" else -1
        reencode = self.penalty == "l1/l2"
        y, n_classes, n_vectors = self._set_label_transformers(y,
                                                               reencode,
                                                               neg_label=-1)
        Y = np.asfortranarray(self.label_binarizer_.transform(y),
                              dtype=np.float64)

        # Initialize coefficients
        if not self.warm_start or self.coef_ is None:
            self.C_init = self.C
            self.coef_ = np.zeros((n_vectors, n_features), dtype=np.float64)
            self._init_errors(Y)

        self.intercept_ = np.zeros(n_vectors, dtype=np.float64)
        indices = np.arange(n_features, dtype=np.int32)

        max_steps = self._get_max_steps()

        # Learning
        if self.penalty == "l1/l2":
            tol = self.tol
            #n_min = np.min(np.sum(Y == 1, axis=0))
            #tol *= max(n_min, 1) / n_samples

            vinit = self.violation_init_.get(0, 0) * self.C / self.C_init
            model = _primal_cd(self, self.coef_, self.errors_, ds, y, Y,
                               -1, self.multiclass, indices, 12,
                               self._get_loss(), self.selection, self.permute,
                               self.termination, self.C, self.alpha,
                               self.max_iter, max_steps, self.shrinking, vinit,
                               rs, tol, self.callback, self.n_calls,
                               self.verbose)
            viol = model[0]
            if self.warm_start and len(self.violation_init_) == 0:
                self.violation_init_[0] = viol

        elif self.penalty in ("l1", "l2", "nn"):
            penalty = self._get_penalty()

            n_pos = np.zeros(n_vectors)
            vinit = self.C / self.C_init * np.ones_like(n_pos)
            for k in xrange(n_vectors):
                n_pos[k] = np.sum(Y[:, k] == 1)
                vinit[k] *= self.violation_init_.get(k, 0)
            n_neg = n_samples - n_pos
            tol = self.tol * np.maximum(np.minimum(n_pos, n_neg),
                                        1) / n_samples

            jobs = (delayed(_primal_cd)(self, self.coef_, self.errors_, ds, y,
                                        Y, k, False, indices, penalty,
                                        self._get_loss(), self.selection,
                                        self.permute, self.termination, self.C,
                                        self.alpha, self.max_iter, max_steps,
                                        self.shrinking, vinit[k], rs, tol[k],
                                        self.callback, self.n_calls,
                                        self.verbose)
                    for k in xrange(n_vectors))
            model = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(jobs)
            viol, coefs, errors = zip(*model)
            self.coef_ = np.asarray(coefs)
            self.errors_ = np.asarray(errors)

            for k in range(n_vectors):
                if self.warm_start and not k in self.violation_init_:
                    self.violation_init_[k] = viol[k]

        if self.debiasing:
            nz = self.coef_ != 0

            if not self.warm_debiasing:
                self.coef_ = np.zeros((n_vectors, n_features),
                                      dtype=np.float64)
                self._init_errors(Y)

            indices = np.arange(n_features, dtype=np.int32)
            jobs = (delayed(_primal_cd)(self, self.coef_, self.errors_, ds, y,
                                        Y, k, False, indices[nz[k]], 2,
                                        self._get_loss(), "cyclic",
                                        self.permute, "violation_sum", self.Cd,
                                        1.0, self.max_iter, max_steps, False,
                                        0, rs, self.tol, self.callback,
                                        self.n_calls, self.verbose)
                    for k in xrange(n_vectors))
            model = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(jobs)
            viol, coefs, errors = zip(*model)
            self.coef_ = np.asarray(coefs)
            self.errors_ = np.asarray(errors)

        return self
Beispiel #56
0
    def _do_role_eval(self,
                      vcocodb,
                      detections_file,
                      ovr_thresh=0.5,
                      eval_type='scenario_1'):

        output_txt = detections_file[:len(detections_file) - 4] + '.txt'

        with open(detections_file, 'rb') as f:
            dets = pickle.load(f)

        tp = [[[] for r in range(2)] for a in range(self.num_actions)]
        fp = [[[] for r in range(2)] for a in range(self.num_actions)]
        sc = [[[] for r in range(2)] for a in range(self.num_actions)]

        npos = np.zeros((self.num_actions), dtype=np.float32)

        for i in range(len(vcocodb)):
            image_id = vcocodb[i]['id']
            gt_inds = np.where(vcocodb[i]['gt_classes'] == 1)[0]
            # person boxes
            gt_boxes = vcocodb[i]['boxes'][gt_inds]
            gt_actions = vcocodb[i]['gt_actions'][gt_inds]
            # some peorson instances don't have annotated actions
            # we ignore those instances
            ignore = np.any(gt_actions == -1, axis=1)
            assert np.all(gt_actions[np.where(ignore == True)[0]] == -1)

            for aid in range(self.num_actions):
                npos[aid] += np.sum(gt_actions[:, aid] == 1)

            pred_agents, pred_roles = self._collect_detections_for_image(
                dets, image_id)

            for aid in range(self.num_actions):
                if len(self.roles[aid]) < 2:
                    # if action has no role, then no role AP computed
                    continue

                for rid in range(len(self.roles[aid]) - 1):

                    # keep track of detected instances for each action for each role
                    covered = np.zeros((gt_boxes.shape[0]), dtype=np.bool)

                    # get gt roles for action and role
                    gt_role_inds = vcocodb[i]['gt_role_id'][gt_inds, aid, rid]
                    gt_roles = -np.ones_like(gt_boxes)
                    for j in range(gt_boxes.shape[0]):
                        if gt_role_inds[j] > -1:
                            gt_roles[j] = vcocodb[i]['boxes'][gt_role_inds[j]]

                    agent_boxes = pred_agents[:, :4]
                    role_boxes = pred_roles[:, 5 * aid:5 * aid + 4, rid]
                    agent_scores = pred_roles[:, 5 * aid + 4, rid]

                    valid = np.where(np.isnan(agent_scores) == False)[0]
                    # valid = np.where(agent_scores != 0)[0]

                    agent_scores = agent_scores[valid]
                    agent_boxes = agent_boxes[valid, :]
                    role_boxes = role_boxes[valid, :]

                    idx = agent_scores.argsort()[::-1]

                    for j in idx:
                        pred_box = agent_boxes[j, :]
                        overlaps = get_overlap(gt_boxes, pred_box)

                        # matching happens based on the person
                        jmax = overlaps.argmax()
                        ovmax = overlaps.max()

                        # if matched with an instance with no annotations
                        # continue
                        if ignore[jmax]:
                            continue

                        # overlap between predicted role and gt role
                        if np.all(gt_roles[jmax, :] == -1):  # if no gt role
                            if eval_type == 'scenario_1':
                                if np.all(role_boxes[j, :] == 0.0) or np.all(
                                        np.isnan(role_boxes[j, :])):
                                    # if no role is predicted, mark it as correct role overlap
                                    ov_role = 1.0
                                else:
                                    # if a role is predicted, mark it as false
                                    ov_role = 0.0
                            elif eval_type == 'scenario_2':
                                # if no gt role, role prediction is always correct, irrespective of the actual predition
                                ov_role = 1.0
                            else:
                                raise ValueError('Unknown eval type')
                        else:
                            ov_role = get_overlap(
                                gt_roles[jmax, :].reshape((1, 4)),
                                role_boxes[j, :])

                        is_true_action = (gt_actions[jmax, aid] == 1)
                        sc[aid][rid].append(agent_scores[j])
                        if is_true_action and (ovmax >= ovr_thresh) and (
                                ov_role >= ovr_thresh):
                            if covered[jmax]:
                                fp[aid][rid].append(1)
                                tp[aid][rid].append(0)
                            else:
                                fp[aid][rid].append(0)
                                tp[aid][rid].append(1)
                                covered[jmax] = True
                        else:
                            fp[aid][rid].append(1)
                            tp[aid][rid].append(0)

        # compute ap for each action
        role_ap = np.zeros((self.num_actions, 2), dtype=np.float32)
        role_ap[:] = np.nan
        for aid in range(self.num_actions):
            if len(self.roles[aid]) < 2:
                continue
            for rid in range(len(self.roles[aid]) - 1):
                a_fp = np.array(fp[aid][rid], dtype=np.float32)
                a_tp = np.array(tp[aid][rid], dtype=np.float32)
                a_sc = np.array(sc[aid][rid], dtype=np.float32)
                # sort in descending score order
                idx = a_sc.argsort()[::-1]
                a_fp = a_fp[idx]
                a_tp = a_tp[idx]
                a_sc = a_sc[idx]

                a_fp = np.cumsum(a_fp)
                a_tp = np.cumsum(a_tp)
                rec = a_tp / float(npos[aid])
                # check
                assert (np.amax(rec) <= 1)
                prec = a_tp / np.maximum(a_tp + a_fp, np.finfo(np.float64).eps)
                role_ap[aid, rid] = voc_ap(rec, prec)

        f = open(output_txt, 'a')
        print('---------Reporting Role AP (%)------------------')
        f.write('---------Reporting Role AP (%)------------------\n')
        for aid in range(self.num_actions):
            if len(self.roles[aid]) < 2: continue
            for rid in range(len(self.roles[aid]) - 1):
                info = '{: >23}: AP = {:0.2f} (#pos = {:d})'.format(
                    self.actions[aid] + '-' + self.roles[aid][rid + 1],
                    role_ap[aid, rid] * 100.0, int(npos[aid]))
                print(info)
                f.write(info)
                f.write('\n')

        info = 'Average Role [%s] AP = %.2f' % (eval_type,
                                                np.nanmean(role_ap) * 100.00)
        print(info)
        f.write(info)
        f.write('\n')
        print('---------------------------------------------')
        f.write('---------------------------------------------\n')
        info = 'Average Role [%s] AP = %.2f, omitting the action "point"' % (
            eval_type,
            (np.nanmean(role_ap) * 25 - role_ap[-3][0]) / 24 * 100.00)
        print(info)
        f.write(info)
        f.write('\n')
        print('---------------------------------------------')
        f.write('---------------------------------------------\n')
        f.close()
Beispiel #57
0
    def process_func(idx):
        # Load original image.
        orig_idx = fields['orig_idx'][idx]
        orig_file = fields['orig_file'][idx]
        orig_path = os.path.join(celeba_dir, 'img_celeba', orig_file)
        img = PIL.Image.open(orig_path)

        # Choose oriented crop rectangle.
        lm = landmarks[orig_idx]
        eye_avg = (lm[0] + lm[1]) * 0.5 + 0.5
        mouth_avg = (lm[3] + lm[4]) * 0.5 + 0.5
        eye_to_eye = lm[1] - lm[0]
        eye_to_mouth = mouth_avg - eye_avg
        x = eye_to_eye - rot90(eye_to_mouth)
        x /= np.hypot(*x)
        x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
        y = rot90(x)
        c = eye_avg + eye_to_mouth * 0.1
        quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
        zoom = 1024 / (np.hypot(*x) * 2)

        # Shrink.
        shrink = int(np.floor(0.5 / zoom))
        if shrink > 1:
            size = (int(np.round(float(img.size[0]) / shrink)), int(np.round(float(img.size[1]) / shrink)))
            img = img.resize(size, PIL.Image.ANTIALIAS)
            quad /= shrink
            zoom *= shrink

        # Crop.
        border = max(int(np.round(1024 * 0.1 / zoom)), 3)
        crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
                int(np.ceil(max(quad[:, 1]))))
        crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
                min(crop[3] + border, img.size[1]))
        if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
            img = img.crop(crop)
            quad -= crop[0:2]

        # Simulate super-resolution.
        superres = int(np.exp2(np.ceil(np.log2(zoom))))
        if superres > 1:
            img = img.resize((img.size[0] * superres, img.size[1] * superres), PIL.Image.ANTIALIAS)
            quad *= superres
            zoom /= superres

        # Pad.
        pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
               int(np.ceil(max(quad[:, 1]))))
        pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
               max(pad[3] - img.size[1] + border, 0))
        if max(pad) > border - 4:
            pad = np.maximum(pad, int(np.round(1024 * 0.3 / zoom)))
            img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
            h, w, _ = img.shape
            y, x, _ = np.mgrid[:h, :w, :1]
            mask = 1.0 - np.minimum(np.minimum(np.float32(x) / pad[0], np.float32(y) / pad[1]),
                                    np.minimum(np.float32(w - 1 - x) / pad[2], np.float32(h - 1 - y) / pad[3]))
            blur = 1024 * 0.02 / zoom
            img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
            img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
            img = PIL.Image.fromarray(np.uint8(np.clip(np.round(img), 0, 255)), 'RGB')
            quad += pad[0:2]

        # Transform.
        img = img.transform((4096, 4096), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
        img = img.resize((1024, 1024), PIL.Image.ANTIALIAS)
        img = np.asarray(img).transpose(2, 0, 1)

        # Verify MD5.
        md5 = hashlib.md5()
        md5.update(img.tobytes())
        assert md5.hexdigest() == fields['proc_md5'][idx]

        # Load delta image and original JPG.
        with zipfile.ZipFile(os.path.join(delta_dir, 'deltas%05d.zip' % (idx - idx % 1000)), 'r') as zip:
            delta_bytes = zip.read('delta%05d.dat' % idx)
        with open(orig_path, 'rb') as file:
            orig_bytes = file.read()

        # Decrypt delta image, using original JPG data as decryption key.
        algorithm = cryptography.hazmat.primitives.hashes.SHA256()
        backend = cryptography.hazmat.backends.default_backend()
        salt = bytes(orig_file, 'ascii')
        kdf = cryptography.hazmat.primitives.kdf.pbkdf2.PBKDF2HMAC(algorithm=algorithm, length=32, salt=salt,
                                                                   iterations=100000, backend=backend)
        key = base64.urlsafe_b64encode(kdf.derive(orig_bytes))
        delta = np.frombuffer(bz2.decompress(cryptography.fernet.Fernet(key).decrypt(delta_bytes)),
                              dtype=np.uint8).reshape(3, 1024, 1024)

        # Apply delta image.
        img = img + delta

        # Verify MD5.
        md5 = hashlib.md5()
        md5.update(img.tobytes())
        assert md5.hexdigest() == fields['final_md5'][idx]
        return img
Beispiel #58
0
def main(meshfile,file,iexpt=10,iversn=22,yrflag=3,bio_path=None) :
    
    #
    # Trim input netcdf file name being appropriate for reading
    #
    meshfile=str(meshfile)[2:-2]
    logger.info("Reading mesh information from %s."%(meshfile))
    #
    # Read mesh file containing grid and coordinate information.
    # Note that for now, we are using T-grid in vertical which may need
    # to be improved by utilizing W-point along the vertical axis.
    #
    hdept,gdept,mbathy,mbathy_u,mbathy_v,mask,e3t,plon,plat=read_grid(meshfile)
    logger.warning("Reading grid information from regional.grid.[ab] (not completed)")
    #
    # Convert from P-point (i.e. NEMO grid) to U and V HYCOM grids
    #
    mask_u=p2u_2d(mask)
    mask_v=p2v_2d(mask)
    #
    # Read regional.grid.[ab]
    # Grid angle is not used for this product because all quantities are
    # on regular rectangular grid points.
    #
    angle=numpy.zeros(plon.shape)
    #
    # Number vertical layers in T-point.
    #
    nlev=gdept.size
    #
    # layer thickness in the absence of layer partial steps.
    #
    dt = gdept[1:] - gdept[:-1]
    #
    # Prepare/read input data file (in netcdf format). Reference time is 1950-01-01
    #
    logger.info("Reading data files.")
    file=str(file).strip()[2:-2]
    dirname=os.path.dirname(file)
    logger.debug("file name is {}".format(file))
    logger.debug("dirname is {}".format(dirname))
    logger.debug("basename is {}".format(os.path.basename(file)))
    m=re.match("(MERCATOR-PHY-24-)(.*\.nc)",os.path.basename(file))
    logger.debug("file prefix is {}".format(file_pre))
###    m=re.match(file_pre,os.path.basename(file))
    if not m:
        msg="File %s is not a grid2D file, aborting"%file
        logger.error(msg)
        raise ValueError(msg)
    
    #fileinput0=os.path.join(dirname+"/"+"MERCATOR-PHY-24-"+m.group(2))
    file_date=file[-16:-6]
    fileinput0=file
    print((file_date,file))
    next_day=datetime.datetime.strptime(file_date, '%Y-%m-%d')+datetime.timedelta(days=1)
    fileinput1=datetime.datetime.strftime(next_day,'%Y%m%d')
    fileinput1=os.path.join(dirname+"/"+file_pre+fileinput1+'.nc')
    
    logger.info("Reading from %s"%(fileinput0))
    ncid0=netCDF4.Dataset(fileinput0,"r")
    if timeavg_method==1 and os.path.isfile(fileinput1) :
        
        logger.info("timeavg_method=1, Reading from %s"%(fileinput1))
        ncid1=netCDF4.Dataset(fileinput1,"r")
        #
        # Calculate temporal averaged temperature, salinity, and velocity
        #
        uo =   0.5*(ncid0.variables["uo"][0,:,:,:]+    ncid1.variables["uo"][0,:,:,:])
        vo =   0.5*(ncid0.variables["vo"][0,:,:,:]+    ncid1.variables["vo"][0,:,:,:])
        salt = 0.5*(ncid0.variables["so"][0,:,:,:]+    ncid1.variables["so"][0,:,:,:])
        temp = 0.5*(ncid0.variables["thetao"][0,:,:,:]+ncid1.variables["thetao"][0,:,:,:])
        ssh = numpy.squeeze(0.5*(ncid0.variables["zos"][0,:,:]+ncid1.variables["zos"][0,:,:]))
    
    else:
        #
	# Set variables based on current file when timeavg_method ~=1 or the next netcdf file is not available
        logger.debug("time average method set to {}".format(timeavg_method))
        uo =   ncid0.variables["uo"][0,:,:,:]
        vo =   ncid0.variables["vo"][0,:,:,:]
        salt = ncid0.variables["so"][0,:,:,:]
        temp = ncid0.variables["thetao"][0,:,:,:]
        ssh = numpy.squeeze(ncid0.variables["zos"][0,:,:])
    #
    # I will account these values afterward. Because in the current version, I am accounting for missing values using a gap-filling methodology.
    #	
    logger.debug("getting _FillValue")
    uofill=ncid0.variables["uo"]._FillValue
    vofill=ncid0.variables["vo"]._FillValue
    slfill=ncid0.variables["so"]._FillValue
    tlfill=ncid0.variables["thetao"]._FillValue
    shfill=ncid0.variables["zos"]._FillValue

    # Set time
    logger.info("Set time.")
    time=ncid0.variables["time"][0]
    unit=ncid0.variables["time"].units
    tmp=cfunits.Units(unit)
    refy,refm,refd=(1950,1,1)
    tmp2=cfunits.Units("hours since %d-%d-%d 00:00:00"%(refy,refm,refd))
    tmp3=int(numpy.round(cfunits.Units.conform(time,tmp,tmp2)))
    mydt = datetime.datetime(refy,refm,refd,0,0,0) + datetime.timedelta(hours=tmp3) # Then calculate dt. Phew!

    if timeavg_method==1 and os.path.isfile(fileinput1)  :
        fnametemplate="archv.%Y_%j_%H"
        deltat=datetime.datetime(refy,refm,refd,0,0,0) + \
              datetime.timedelta(hours=tmp3) + \
              datetime.timedelta(hours=12)
        oname=deltat.strftime(fnametemplate)
    else:
        #
        # I am assuming that daily mean can be set at 00 instead of 12
        # for cases that there is no information of next day.
        #
        fnametemplate="archv.%Y_%j"
        deltat=datetime.datetime(refy,refm,refd,0,0,0) + \
              datetime.timedelta(hours=tmp3)
        oname=deltat.strftime(fnametemplate) + '_00'

    # model day
    refy, refm, refd=(1900,12,31)
    model_day= deltat-datetime.datetime(refy,refm,refd,0,0,0)
    model_day=model_day.days
    logger.info("Model day in HYCOM is %s"%str(model_day))
    if bio_path:
       jdm,idm=numpy.shape(plon)
       points = numpy.transpose(((plat.flatten(),plon.flatten())))
       delta = mydt.strftime( '%Y-%m-%d')
       # filename format MERCATOR-BIO-14-2013-01-05-00
       print((bio_path,delta))
       idx,biofname=search_biofile(bio_path,delta)
       if idx >7: 
          msg="No available BIO file within a week difference with PHY"
          logger.error(msg)
          raise ValueError(msg)
       logger.info("BIO file %s reading & interpolating to 1/12 deg grid cells ..."%biofname)
       ncidb=netCDF4.Dataset(biofname,"r")
       blon=ncidb.variables["longitude"][:];
       blat=ncidb.variables["latitude"][:]
       minblat=blat.min()
       no3=ncidb.variables["NO3"][0,:,:,:];
       no3[numpy.abs(no3)>1e+10]=numpy.nan
       po4=ncidb.variables["PO4"][0,:,:,:]
       si=ncidb.variables["Si"][0,:,:,:]
       po4[numpy.abs(po4)>1e+10]=numpy.nan
       si[numpy.abs(si)>1e+10]=numpy.nan
       # TODO: Ineed to improve this part
       nz,ny,nx=no3.shape
       dummy=numpy.zeros((nz,ny,nx+1))
       dummy[:,:,:nx]=no3;dummy[:,:,-1]=no3[:,:,-1]
       no3=dummy
       dummy=numpy.zeros((nz,ny,nx+1))
       dummy[:,:,:nx]=po4;dummy[:,:,-1]=po4[:,:,-1]
       po4=dummy
       dummy=numpy.zeros((nz,ny,nx+1))
       dummy[:,:,:nx]=si;dummy[:,:,-1]=si[:,:,-1]
       si=dummy
       dummy=numpy.zeros((nx+1))
       dummy[:nx]=blon
       blon=dummy
       blon[-1]=-blon[0]
# TODO:  Note that the coordinate files are for global configuration while
#        the data file saved for latitude larger than 30. In the case you change your data file coordinate
#        configuration you need to modify the following lines
       bio_coordfile=bio_path[:-4]+"/GLOBAL_ANALYSIS_FORECAST_BIO_001_014_COORD/GLO-MFC_001_014_mask.nc"
       biocrd=netCDF4.Dataset(bio_coordfile,"r")
       blat2 = biocrd.variables['latitude'][:]
       index=numpy.where(blat2>=minblat)[0]
       depth_lev = biocrd.variables['deptho_lev'][index[0]:,:]
#
#
#
       dummy=numpy.zeros((ny,nx+1))
       dummy[:,:nx]=depth_lev;dummy[:,-1]=depth_lev[:,-1]
       depth_lev=dummy
       depth_lev[depth_lev>50]=0
       depth_lev=depth_lev.astype('i')
       dummy_no3=no3
       dummy_po4=po4
       dummy_si=si
       for j in range(ny):
          for i in range(nx):
             dummy_no3[depth_lev[j,i]:nz-2,j,i]=no3[depth_lev[j,i]-1,j,i]
             dummy_po4[depth_lev[j,i]:nz-2,j,i]=po4[depth_lev[j,i]-1,j,i]
             dummy_si[depth_lev[j,i]:nz-2,j,i]=si[depth_lev[j,i]-1,j,i]
       no3=dummy_no3
       po4=dummy_po4
       si=dummy_si

#
       po4 = po4 * 106.0 * 12.01
       si = si   * 6.625 * 12.01
       no3 = no3 * 6.625 * 12.01


    logger.info("Read, trim, rotate NEMO velocities.")
    u=numpy.zeros((nlev,mbathy.shape[0],mbathy.shape[1]))
    v=numpy.zeros((nlev,mbathy.shape[0],mbathy.shape[1]))
    utmp=numpy.zeros((mbathy.shape))
    vtmp=numpy.zeros((mbathy.shape))
    #
    # Metrices to detect carrefully bottom at p-, u-, and v-grid points.While I have used 3D, mask data,following methods are good enough for now.
    #
    if mbathy_method  ==  1 :
        ip = mbathy   == -1
        iu = mbathy_u == -1
        iv = mbathy_v == -1
    else:
        ip = mask   == 0
        iu = mask_u == 0
        iv = mask_v == 0
    #
    # Read 3D velocity field to calculate barotropic velocity
    #
    # Estimate barotropic velocities using partial steps along the vertical axis. Note that for the early version of this code, 
    # I used dt = gdept[1:] - gdept[:-1] on NEMO t-grid. Furthermore, you may re-calculate this part on vertical grid cells for future. 
    #
    logger.info("Calculate barotropic velocities.")
    ubaro,vbaro=calc_uvbaro(uo,vo,e3t,iu,iv)
    #
    # Save 2D fields (here only ubaro & vbaro)
    #
    zeros=numpy.zeros(mbathy.shape)
    #flnm = open(oname+'.txt', 'w')
    #flnm.write(oname)
    #flnm.close()
    ssh = numpy.where(numpy.abs(ssh)>1000,0.,ssh*9.81) # NB: HYCOM srfhgt is in geopotential ...
    #
    outfile = abf.ABFileArchv("./data/"+oname,"w",iexpt=iexpt,iversn=iversn,yrflag=yrflag,)
    outfile.write_field(zeros,                   ip,"montg1"  ,0,model_day,1,0)
    outfile.write_field(ssh,                     ip,"srfhgt"  ,0,model_day,0,0)
    outfile.write_field(zeros,                   ip,"surflx"  ,0,model_day,0,0) # Not used
    outfile.write_field(zeros,                   ip,"salflx"  ,0,model_day,0,0) # Not used
    outfile.write_field(zeros,                   ip,"bl_dpth" ,0,model_day,0,0) # Not used
    outfile.write_field(zeros,                   ip,"mix_dpth",0,model_day,0,0) # Not used
    outfile.write_field(ubaro,                   iu,"u_btrop" ,0,model_day,0,0)
    outfile.write_field(vbaro,                   iv,"v_btrop" ,0,model_day,0,0)
    #
    if bio_path:
       logger.info("Calculate baroclinic velocities, temperature, and salinity data as well as BIO field.")
    else:
       logger.info("Calculate baroclinic velocities, temperature, and salinity data.")
    for k in numpy.arange(u.shape[0]) :
        if bio_path:
           no3k=interpolate2d(blat, blon, no3[k,:,:], points).reshape((jdm,idm))
           no3k = maplev(no3k)
           po4k=interpolate2d(blat, blon, po4[k,:,:], points).reshape((jdm,idm))
           po4k = maplev(po4k)
           si_k=interpolate2d(blat, blon, si[k,:,:], points).reshape((jdm,idm))
           si_k = maplev(si_k)
           if k%10==0 : logger.info("Writing 3D variables including BIO, level %d of %d"%(k+1,u.shape[0]))
        else:
           if k%10==0 : logger.info("Writing 3D variables, level %d of %d"%(k+1,u.shape[0]))
        #

        #
        uo[k,:,:]=numpy.where(numpy.abs(uo[k,:,:])<10,uo[k,:,:],0)
        vo[k,:,:]=numpy.where(numpy.abs(vo[k,:,:])<10,vo[k,:,:],0)

        # Baroclinic velocity (in HYCOM U- and V-grid)
        ul = p2u_2d(numpy.squeeze(uo[k,:,:])) - ubaro
        vl = p2v_2d(numpy.squeeze(vo[k,:,:])) - vbaro
        ul[iu]=spval
        vl[iv]=spval
        
        # Layer thickness
        
        dtl=numpy.zeros(mbathy.shape)
        # Use dt for the water column except the nearest cell to bottom 
        if thickness_method==1:
            if k < u.shape[0]-1 :
                J,I = numpy.where(mbathy>k)
                e3=(e3t[k,:,:])
                dtl[J,I]=dt[k]
                J,I = numpy.where(mbathy==k)
                dtl[J,I]=e3[J,I]
            else:
                e3=(e3t[k,:,:])
                J,I = numpy.where(mbathy==k)
                dtl[J,I]=e3[J,I]
	# Use partial cells for the whole water column.
        else :
            J,I = numpy.where(mbathy>=k)
            dtl[J,I]=e3t[k,J,I]

        # Salinity
        sl = salt[k,:,:]

        # Temperature
        tl = temp[k,:,:]
        # Need to be carefully treated in order to minimize artifacts to the resulting [ab] files.
        if fillgap_method==1:
            J,I= numpy.where(mbathy<k)
            sl = maplev(numpy.where(numpy.abs(sl)<1e2,sl,numpy.nan))
            sl[J,I]=spval
            J,I= numpy.where(mbathy<k)
            tl = maplev(numpy.where(numpy.abs(tl)<1e2,tl,numpy.nan))
            tl[J,I]=spval
        else:
            sl = numpy.where(numpy.abs(sl)<1e2,sl,numpy.nan)
            sl = numpy.minimum(numpy.maximum(maplev(sl),25),80.)
            tl = numpy.where(numpy.abs(tl)<=5e2,tl,numpy.nan)
            tl = numpy.minimum(numpy.maximum(maplev(tl),-5.),50.)

        # Thickness
        dtl = maplev(dtl)
        if k > 0 :
            with numpy.errstate(invalid='ignore'):
                K= numpy.where(dtl < 1e-4)
            sl[K] = sl_above[K]
            tl[K] = tl_above[K]
        #
        sl[ip]=spval
        tl[ip]=spval

        # Save 3D fields
        outfile.write_field(ul      ,iu,"u-vel.",0,model_day,k+1,0)
        outfile.write_field(vl      ,iv,"v-vel.",0,model_day,k+1,0)
        outfile.write_field(dtl*onem,ip,"thknss",0,model_day,k+1,0)
        outfile.write_field(tl      ,ip,"temp" , 0,model_day,k+1,0)
        outfile.write_field(sl      ,ip,"salin" ,0,model_day,k+1,0)
        if bio_path :
           outfile.write_field(no3k      ,ip,"ECO_no3" ,0,model_day,k+1,0)
           outfile.write_field(po4k      ,ip,"ECO_pho" ,0,model_day,k+1,0)
           outfile.write_field(si_k      ,ip,"ECO_sil" ,0,model_day,k+1,0)
                
        tl_above=numpy.copy(tl)
        sl_above=numpy.copy(sl)
    
    outfile.close()
    ncid0.close()
    if timeavg_method==1 and os.path.isfile(fileinput1)  :
        ncid1.close()
    if bio_path :
       ncidb.close()
Beispiel #59
0
    def train(self, interactive_plot = False, verbose = True):
        ''' Trains the Q-learning with IR agent'''
        
        start_time = timeit.default_timer()
        
        if interactive_plot:
            #interactive plot: initialise the graph and settings
            Title =  'Q-learning with penalty' if self.with_penalty else 'Q-learning no penalty'
            self.initialize_plot(Title, 'Time steps','Action-value')
            Labels = np.array([['L['+str(self.sa1)+']', 'Q['+str(self.sa1)+']','U['+str(self.sa1)+']','Q-learning['+str(self.sa1)+']'],
                               ['L['+str(self.sa2)+']', 'Q['+str(self.sa2)+']','U['+str(self.sa2)+']','Q-learning['+str(self.sa2)+']'],
                               ['L['+str(self.sa3)+']', 'Q['+str(self.sa3)+']','U['+str(self.sa3)+']','Q-learning['+str(self.sa3)+']']])
            
            self.L_list.append(np.copy(self.L))
            self.U_list.append(np.copy(self.U))
        self.Q_list.append(np.copy(self.Q)) 
        
        self.QL_list.append(np.copy(self.Q_learning))
        # initialize state
        state = self.env.reset()
        state_idx = self.find_indices(state)
        
        for step in range(self.num_steps):

            if self.USE_SCHEDULE:
                self.U_lr= self.U_lr_schedule[step]
                self.L_lr= self.L_lr_schedule[step]

            # choose an action based on epsilon-greedy policy
            q_values =  self.Q[state_idx, :]           
            action, action_idx = self.epsilon_greedy_policy(q_values, state_idx)
            
            self.most_visited_sa.append((state_idx,action_idx))
            
            self.count[state_idx, action_idx] += 1
              
            self.lr = self.lr_func(self.count[state_idx, action_idx])  
            
            # execute action
            newState, reward, info = self.env.step(action)
            newState_idx = self.find_indices(newState)
            
            self.memory_eps.append(info['noise']['epsilons'])
            self.memory_B.append(info['noise']['B'])
            # Q-Learning update
            self.Q[state_idx, action_idx] +=  self.lr *(reward + self.gamma* np.max(self.Q[newState_idx, :]) -\
                                          self.Q[state_idx, action_idx])
            if interactive_plot:
            # Standard Q-Learning following same behavioral policy
                self.Q_learning[state_idx, action_idx] +=  self.lr *(reward + self.gamma* np.max(self.Q_learning[newState_idx, :]) -\
                                                       self.Q_learning[state_idx, action_idx])
            cond = (step % self.M ==0 and step >= self.burn_in) and \
            (not np.isclose(self.L[state_idx, action_idx],self.U[state_idx, action_idx],self.relTol) or \
             not self.L[state_idx, action_idx]<=self.Q[state_idx, action_idx] <=self.U[state_idx, action_idx])
            if cond:#        
                
                sample_path_eps  = self.memory_eps.simulate_sample_path()
                sample_path_B  = self.memory_B.simulate_sample_path()
                assert len(sample_path_eps)==len(sample_path_B)
                if self.USE_K:
                    self.sample_eps = self.memory_eps.sample()
                    self.sample_B = self.memory_B.sample()
                else:
                    self.sample_eps = sample_path_eps
                    self.sample_B = sample_path_B
                    
                time =timeit.default_timer()
                self.solve_QG_QL_DP(sample_path_eps, sample_path_B)                
                if self.with_penalty: 
                        self.U += self.U_lr * (self.QG[0,:, :] - self.U)
                    
                else:
                        self.U += self.U_lr * (self.QD[0,:, :] - self.U)
                    
                self.L += self.L_lr * (self.QL[0,:, :] - self.L)

            self.Q[state_idx, action_idx] = np.maximum(np.minimum(self.U[state_idx, action_idx],\
                                                  self.Q[state_idx, action_idx]),self.L[state_idx, action_idx])
             
            state = newState
            state_idx = newState_idx
 
            if interactive_plot:
                #print new Action-values after every 10000 step  
                self.Lsa_1.append(self.L[self.sa1])
                self.Qsa_1.append(self.Q[self.sa1])
                self.Usa_1.append(self.U[self.sa1]) 
                self.QLsa_1.append(self.Q_learning[self.sa1])
                self.Lsa_2.append(self.L[self.sa2])
                self.Qsa_2.append(self.Q[self.sa2])
                self.Usa_2.append(self.U[self.sa2]) 
                self.QLsa_2.append(self.Q_learning[self.sa2])
                self.Lsa_3.append(self.L[self.sa3])
                self.Qsa_3.append(self.Q[self.sa3])
                self.Usa_3.append(self.U[self.sa3]) 
                self.QLsa_3.append(self.Q_learning[self.sa3])
                
                
    #            self.L_list.append(np.copy(self.L))
    #            self.U_list.append(np.copy(self.U))
            
                if step % 10000 == 0:
                    to_plot =np.array([[self.Lsa_1, self.Qsa_1, self.Usa_1, self.QLsa_1],
                                       [self.Lsa_2, self.Qsa_2, self.Usa_2, self.QLsa_2],
                                       [self.Lsa_3, self.Qsa_3, self.Usa_3, self.QLsa_3]])
                    self.plot_on_running(to_plot, Labels, step)
            if (step % self.num_to_save_Q ) == 0 and (step>0):
                 self.Q_list.append(np.copy(self.Q))
                 # self.QL_list.append(np.copy(self.Q_learning))

        elapsed_time = timeit.default_timer() - start_time
        print("Time="+str(elapsed_time))
        print(elapsed_time)
        return self.Q_list, self.QL_list, elapsed_time 
 def forward(self,x):
     self.x=x
     Reluop= np.maximum(0,self.x)
     return Reluop