示例#1
0
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
    """
    np.where(cond, x, fillvalue) always evaluates x even where cond is False.
    This one only evaluates f(arr1[cond], arr2[cond], ...).
    For example,
    >>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
    >>> def f(a, b):
        return a*b
    >>> _lazywhere(a > 2, (a, b), f, np.nan)
    array([ nan,  nan,  21.,  32.])

    Notice it assumes that all `arrays` are of the same shape, or can be
    broadcasted together.

    """
    if fillvalue is None:
        if f2 is None:
            raise ValueError("One of (fillvalue, f2) must be given.")
        else:
            fillvalue = np.nan
    else:
        if f2 is not None:
            raise ValueError("Only one of (fillvalue, f2) can be given.")

    arrays = np.broadcast_arrays(*arrays)
    temp = tuple(np.extract(cond, arr) for arr in arrays)
    tcode = np.mintypecode([a.dtype.char for a in arrays])
    out = _valarray(np.shape(arrays[0]), value=fillvalue, typecode=tcode)
    np.place(out, cond, f(*temp))
    if f2 is not None:
        temp = tuple(np.extract(~cond, arr) for arr in arrays)
        np.place(out, ~cond, f2(*temp))

    return out
示例#2
0
    def update_strip_pattern(self, new_pos=False):
        with self.visuals_changed.hold_and_emit():
            if not self.block_strip:
                self.block_strip = True

                if new_pos:
                    # calculate average starting point y value
                    condition = (self.data_x >= self.strip_startx - 0.1) & (self.data_x <= self.strip_startx + 0.1)
                    section = np.extract(condition, self.data_y[:, 0])
                    self.avg_starty = np.average(section)
                    noise_starty = 2 * np.std(section) / self.avg_starty

                    # calculate average ending point y value
                    condition = (self.data_x >= self.strip_endx - 0.1) & (self.data_x <= self.strip_endx + 0.1)
                    section = np.extract(condition, self.data_y[:, 0])
                    self.avg_endy = np.average(section)
                    noise_endy = 2 * np.std(section) / self.avg_starty

                    # Calculate new slope and noise level
                    self.strip_slope = (self.avg_starty - self.avg_endy) / (self.strip_startx - self.strip_endx)
                    self.noise_level = (noise_starty + noise_endy) * 0.5

                # Get the x-values in between start and end point:
                condition = (self.data_x >= self.strip_startx) & (self.data_x <= self.strip_endx)
                section_x = np.extract(condition, self.data_x)

                # Calculate the new y-values, add noise according to noise_level
                noise = self.avg_endy * 2 * (np.random.rand(*section_x.shape) - 0.5) * self.noise_level
                section_y = (self.strip_slope * (section_x - self.strip_startx) + self.avg_starty) + noise
                self.stripped_pattern = (section_x, section_y)
                self.block_strip = False
def plotterIterations(X,nX,nIt,y,ny) :
    #On se ramène à des journées
    EltIteration = X[:,nIt]
    Iterations = np.unique(EltIteration)
    print(Iterations)

    #Gérer plusieurs couleurs
    colors = cm.rainbow(np.linspace(0, 1, len(Iterations)))
    
    i = 0
    for k,c in zip(Iterations,colors)  :
        #On prend que le jour
        condition = EltIteration == k #entre 60 et 153
        xPlot = np.extract(condition, X[:,nX])
        yPlot = np.extract(condition, y[:,ny])
        plt.scatter(xPlot,yPlot, color=c,s=2)
        if i==10 :
            break
        i+=1
        #sort
        #print(np.concatenate(xPlot,yPlot))
        #z = np.sort(np.concatenate(xPlot,yPlot),0)
        #plt.plot(z[:,0], z[:,1], color=c,linewidth=2)
        #print(k)
        #print(xPlot)
        #print(yPlot)
        #print(z[:,0])
        #print(z[:,1])
        #break

    #plt.plot(X_test, regr.predict(X_test), color='blue',linewidth=3)
    plt.xticks(())
    plt.yticks(())

    plt.title('')
示例#4
0
文件: basic.py 项目: alexleach/scipy
def diric(x,n):
    """Returns the periodic sinc function also called the dirichlet function:

    diric(x) = sin(x *n / 2) / (n sin(x / 2))

    where n is a positive integer.
    """
    x,n = asarray(x), asarray(n)
    n = asarray(n + (x-x))
    x = asarray(x + (n-n))
    if issubdtype(x.dtype, inexact):
        ytype = x.dtype
    else:
        ytype = float
    y = zeros(x.shape,ytype)

    mask1 = (n <= 0) | (n <> floor(n))
    place(y,mask1,nan)

    z = asarray(x / 2.0 / pi)
    mask2 = (1-mask1) & (z == floor(z))
    zsub = extract(mask2,z)
    nsub = extract(mask2,n)
    place(y,mask2,pow(-1,zsub*(nsub-1)))

    mask = (1-mask1) & (1-mask2)
    xsub = extract(mask,x)
    nsub = extract(mask,n)
    place(y,mask,sin(nsub*xsub/2.0)/(nsub*sin(xsub/2.0)))
    return y
示例#5
0
文件: windows.py 项目: 7islands/scipy
def parzen(M, sym=True):
    """Return a Parzen window.

    Parameters
    ----------
    M : int
        Number of points in the output window. If zero or less, an empty
        array is returned.
    sym : bool, optional
        When True, generates a symmetric window, for use in filter design.
        When False, generates a periodic window, for use in spectral analysis.

    Returns
    -------
    w : ndarray
        The window, with the maximum value normalized to 1 (though the value 1
        does not appear if the number of samples is even and sym is True).

    Examples
    --------
    Plot the window and its frequency response:

    >>> from scipy import signal
    >>> from scipy.fftpack import fft, fftshift
    >>> import matplotlib.pyplot as plt

    >>> window = signal.parzen(51)
    >>> plt.plot(window)
    >>> plt.title("Parzen window")
    >>> plt.ylabel("Amplitude")
    >>> plt.xlabel("Sample")

    >>> plt.figure()
    >>> A = fft(window, 2048) / (len(window)/2.0)
    >>> freq = np.linspace(-0.5, 0.5, len(A))
    >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
    >>> plt.plot(freq, response)
    >>> plt.axis([-0.5, 0.5, -120, 0])
    >>> plt.title("Frequency response of the Parzen window")
    >>> plt.ylabel("Normalized magnitude [dB]")
    >>> plt.xlabel("Normalized frequency [cycles per sample]")

    """
    if M < 1:
        return np.array([])
    if M == 1:
        return np.ones(1, 'd')
    odd = M % 2
    if not sym and not odd:
        M = M + 1
    n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
    na = np.extract(n < -(M - 1) / 4.0, n)
    nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
    wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
    wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
          6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
    w = np.r_[wa, wb, wa[::-1]]
    if not sym and not odd:
        w = w[:-1]
    return w
示例#6
0
def log_distances(m, sampleix=0, doplot=True, quiet=True, logv=None,
              plotstyle=None, return_ixs=False):
    """Log distances (L2-norm) of data points from a reference point (sampleix).

    For multiple calls to this function on the same data, it's faster to
    pass logv precomputed.

    return_ixs=True also returns the point indices from the sort to
    be returned in this argument in a third return value.
    """
    npts = size(m,0)
    assert sampleix >= 0
    d = zeros((npts-1,), 'd')
    if rank(m) == 3:
        for i in range(npts):
            if sampleix != i:
                try:
                    d[i] = norm(m[sampleix,:,:]-m[i,:,:])
                except IndexError:
                    # catch case when index is too large for npts-1
                    # so use the empty i=pix position (ordering doesn't matter)
                    d[sampleix] = norm(m[sampleix,:,:]-m[i,:,:])
    elif rank(m) == 2:
        for i in range(npts):
            if sampleix != i:
                try:
                    d[i] = norm(m[sampleix,:]-m[i,:])
                except IndexError:
                    # catch case when index is too large for npts-1
                    # so use the empty i=pix position (ordering doesn't matter)
                    d[sampleix] = norm(m[sampleix,:]-m[i,:])
    else:
        raise ValueError("Rank of input data must be 2 or 3")
    if return_ixs:
        # return sorted indices in that list argument
        # (assumed to be an empty list)
        ixs = array(argsort(d))
        d = d[ixs]
    else:
        # just sort
        d.sort()
    if not quiet:
        print("Chose reference point %i"%sampleix)
        print("Min distance = %f, max distance = %f"%(d[0], d[-1]))
    logd = log(d).ravel()
    if logv is None:
        logv = log(list(range(1,len(d)+1)))
    nan_ixs = isfinite(logd)  # mask, not a list of indices
    logd = extract(nan_ixs, logd)
    logv = extract(nan_ixs, logv)
    if doplot:
        if plotstyle is None:
            plot(logd,logv)
        else:
            plot(logd,logv,plotstyle)
    if return_ixs:
        ixs = extract(nan_ixs, ixs)
        return (logv, logd, ixs)
    else:
        return (logv, logd)
示例#7
0
def reduce_grid_points(mesh_divisors,
                       grid_address,
                       dense_grid_points,
                       dense_grid_weights=None,
                       coarse_mesh_shifts=None):
    divisors = np.array(mesh_divisors, dtype='intc')
    if (divisors == 1).all():
        coarse_grid_points = np.array(dense_grid_points, dtype='intc')
        if dense_grid_weights is not None:
            coarse_grid_weights = np.array(dense_grid_weights, dtype='intc')
    else:
        grid_weights = []
        if coarse_mesh_shifts is None:
            shift = [0, 0, 0]
        else:
            shift = np.where(coarse_mesh_shifts, divisors // 2, [0, 0, 0])
        modulo = grid_address[dense_grid_points] % divisors
        condition = (modulo == shift).all(axis=1)
        coarse_grid_points = np.extract(condition, dense_grid_points)
        if dense_grid_weights is not None:
            coarse_grid_weights = np.extract(condition, dense_grid_weights)

    if dense_grid_weights is None:
        return coarse_grid_points
    else:
        return coarse_grid_points, coarse_grid_weights
示例#8
0
文件: lsofcsr.py 项目: chrinide/pyscf
def lsofcsr(coo3, dtype=float, shape=None, axis=0):
  """
    Generate a list of csr matrices out of a 3-dimensional coo format 
    Args:
      coo3  : must be a tuple (data, (i1,i2,i3)) in analogy to the tuple (data, (rows,cols)) for a common coo format
      shape : a tuple of dimensions if they are known or cannot be guessed correctly from the data
      axis  : index (0,1 or 2) along which to construct the list of sparse arrays
    Returns:
      list of csr matrices
  """
  (d, it) = coo3
  assert len(it)==3
  for ia in it: assert len(d)==len(ia)
  shape = (max(ia) for ia in it) if shape is None else shape
  #print( len(d) )
  #print( shape )
  
  iir = [i for i in range(len(shape)) if i!=axis]
  #print(iir)
  
  lsofcsr = [0] * shape[axis]
  sh = [shape[i] for i in iir]
  #print(sh, shape)
  for i in range(shape[axis]):
    mask = it[axis]==i
    csrm = csr_matrix( (extract(mask,d), (extract(mask,it[iir[0]]),extract(mask,it[iir[1]]) )), shape=sh, dtype=dtype)
    csrm.eliminate_zeros()
    lsofcsr[i] = csrm
  return lsofcsr
def get_linear_idxs_of_diff_bits(SortedDeltaIdxs, dists_vec, cfg):

    # Identifies spots in matrix where the indices of diff bits will be.
    IsPossDistLteActual = calc_IsPossDistLteActual(dists_vec, cfg)

    # Columnar linear indices for the whole mtx.
    UniqSortedDeltaIdxs = uniq_SortedDeltaIdxs(SortedDeltaIdxs, cfg)
    
    # >> Which bits were actually different!  (Pts were on opposite sides of HP.) <<
    #
    # Hack:
    # We +1 to the idxs, so that none has the value 0.
    # Then we replace all the idxs we don't want with 0.
    # For those left, we shift back down one.
    LinearIdxsOfDiffBits = (UniqSortedDeltaIdxs + 1) * IsPossDistLteActual
    my_assert (lambda : LinearIdxsOfDiffBits.shape == cfg.shape)
    # print 'LinearIdxsOfDiffBits (+1)';  print LinearIdxsOfDiffBits

    diff_bits = np.extract(LinearIdxsOfDiffBits > 0, LinearIdxsOfDiffBits)
    diff_bits -= 1
    # --------------------------------
    # extract(): returns non-zero vals by going along ROWs,
    # not down COLs (as in Matlab).  So the ORDER of these may be wacky.
    # Sorting gives us the same order that Matlab would.
    # --------------------------------
    diff_bits.sort()

    my_assert (lambda :
               len(diff_bits) ==
               len(np.extract(IsPossDistLteActual == True, IsPossDistLteActual)))
    
    print 'diff_bits:', diff_bits
    return diff_bits
示例#10
0
def asymptotic_pdf(t, tres, tau, area):
    """
    Calculate asymptotic probabolity density function.

    Parameters
    ----------
    t : ndarray.
        Time.
    tres : float
        Time resolution.
    tau : ndarray, shape(k, 1)
        Time constants.
    area : ndarray, shape(k, 1)
        Component relative area.

    Returns
    -------
    apdf : ndarray.
    """
    t1 = np.extract(t[:] < tres, t)
    t2 = np.extract(t[:] >= tres, t)
    apdf2 = t2 * pdfs.expPDF(t2 - tres, tau, area)
    apdf = np.append(t1 * 0.0, apdf2)

    return apdf
示例#11
0
def factorial2(n,exact=0):
    """n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi)  n odd
           = 2**(n) * n!                                 n even

    If exact==0, then floating point precision is used, otherwise
    exact long integer is computed.

    Notes:
      - Array argument accepted only for exact=0 case.
      - If n<0, the return value is 0.
    """
    if exact:
        if n < -1:
            return 0L
        if n <= 0:
            return 1L
        val = 1L
        for k in xrange(n,0,-2):
            val *= k
        return val
    else:
        from scipy import special
        n = asarray(n)
        vals = zeros(n.shape,'d')
        cond1 = (n % 2) & (n >= -1)
        cond2 = (1-(n % 2)) & (n >= -1)
        oddn = extract(cond1,n)
        evenn = extract(cond2,n)
        nd2o = oddn / 2.0
        nd2e = evenn / 2.0
        place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
        place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e))
        return vals
示例#12
0
def count_Ave_CorrCoef(NeuCountArray):
    """
    FUNCTION DESCRIPTION
        This function takes as input a N-D numpy.array of spike times, and outputs a spike count vector; the spike
        counts are averaged over a user defined interval

    :param NeuCountArray: N-D numpy.array, units are seconds, neuron spike times for each neuron stored in an numpy.array
    """


    #Spike times array turned into a numpy array
    NeuCountArray = np.array(NeuCountArray)
    print(NeuCountArray)

    #Generate array of normalized correlation coefficients (makes a symmetrical matrix)
    CountCorrCoef = np.corrcoef(NeuCountArray, rowvar = True)
    print(CountCorrCoef)

    #Keep the upper triangle of symmetrical corr. coef. matrix and the diagnol of ones (the redunate information is converted to zeros)
    UpTriCorrCoef = np.triu(CountCorrCoef)
    print (UpTriCorrCoef)

    #Eliminate the zeros (redunant info from symmetrical matrix) from the symmetrical corr. coef matrix
    NoZero = np.extract(abs(UpTriCorrCoef) > 0, UpTriCorrCoef)
    print(NoZero)

    #Eliminate the ones from the symmetrical corr. coef matrix (eliminate the variances and keep covariances)
    CovariancesOnly = np.extract(abs(NoZero) < 1, NoZero)
    print(CovariancesOnly)

    #Average the covariances
    AveCov = np.mean(abs(CovariancesOnly))

    return (AveCov)
def find_next_set_images(location_x,location_y,heading,file_database_sorted,picture_name_list):
    
    image_found=0
    
    heading,direction_vector=phase_wrap_heading(heading)
    # Convert heading
    phase_wrap=np.array([3, 0, 1, 2, 3, 0],dtype='u1')
    heading_array=np.array([phase_wrap[heading], phase_wrap[heading+1],phase_wrap[heading+2]])
    # find x values
    matched_x_loc=np.extract(file_database_sorted['x_loc']==location_x,file_database_sorted)
    # Check values found!!!!!
    if matched_x_loc.size<4:
        print "Not enough images at this x location!!, number img=\t", matched_x_loc.size 
        return (0,0,heading,direction_vector,0)

    # find y values
    matched_y_loc=np.extract(matched_x_loc['y_loc']==location_y,matched_x_loc)
    # Check values found!!!!!
    if matched_y_loc.size<4:
        print "Not enough images at this y location!!, number img=\t", matched_y_loc.size 
        return (0,0,heading,direction_vector,0)
 
    images_2_display=matched_y_loc['file_id'][heading_array]
    combined_img = np.concatenate(img_file[images_2_display] , axis=1) #file_database_north_sortx[0,:]
    resized_img = cv2.resize(combined_img, (image_display_width, image_display_height)) 
    image_found=1
    picture_name=picture_name_list[images_2_display[1]]
    return (resized_img,image_found,heading,direction_vector,picture_name)
示例#14
0
def get_Ic(qNow, sFinal, qMax, x, interp, extrap, weighted_transition=True):
    '''return the corrected intensity based on circular symmetry'''
    u = numpy.sqrt(qNow*qNow + x*x) # circular-symmetric

    # divide integrand into different regions
    # interpolate from existing data
    u_in = numpy.extract(u <= sFinal, u)
    Ic_in = numpy.exp(interp(u_in))
    
    condition = numpy.multiply(sFinal < u, u <= qMax)
    u_mid = numpy.extract(condition, u)
    if u_mid.size < 2 or not weighted_transition:
        Ic_mid = numpy.exp(interp(u_mid))
    else:
        # make smooth transition between sFinal < q < qMax
        #weight = (u_mid - u_mid.min()) / (u_mid.max() - u_mid.min())
        weight = numpy.linspace(0, 1.0, u_mid.size)
        Ic_mid_in = numpy.exp(interp(u_mid))
        Ic_mid_ex = extrap.calc(u_mid)
        Ic_mid = (1-weight) * Ic_mid_in + weight * Ic_mid_ex

    # extrapolate from model beyond range of available data
    u_ex = numpy.extract(qMax < u, u)
    Ic_ex = extrap.calc(u_ex)
    
    # join the parts of the integrand
    return numpy.concatenate((Ic_in, Ic_mid, Ic_ex))
示例#15
0
def getClusterXYZ(RecHits, clusterID):
    '''
    Computes the log-energy-weighted xyzt coordinates of a given cluster. The energy is initially
    reduced by a threshold amount to discard noisy hits, and then log'd to account for energy
    collection fluctuations in the material.
    '''
    extractMask = np.logical_and.reduce((RecHits['t'] > 0, 
                                         RecHits['clusterID'] == clusterID,
                                         RecHits['isIn3x3']))                                       #|Extraction mask for cluster t and xyz processing; decides initially which hits to be counted
    #minLayer = np.min(newRecHits['layerID'])
    #newRecHits = np.compress(newRecHits['layerID'] == minLayer, newRecHits)
    x = np.extract(extractMask, RecHits['x'])
    y = np.extract(extractMask, RecHits['y'])
    z = np.extract(extractMask, RecHits['z'])
    E = np.extract(extractMask, RecHits['en'])

    return (np.mean(x), np.mean(y), np.mean(z))                                                     #|Flat average

    # Energy weight
    w0 = 7                                                                                          #|Arbitrary weighting term that works well, as found by Geoffrey Monet
    Eweight = np.maximum(np.log(E/np.sum(E)) + w0, 0)


    xw, yw, zw = (np.dot(x,Eweight)/np.sum(Eweight), 
                  np.dot(y,Eweight)/np.sum(Eweight), 
                  np.dot(z,Eweight)/np.sum(Eweight))                                                #|Weighted average

    return xw, yw, zw
def makeHistogram(coadd, numBins, numImages):
    """Generate a histogram for a given coadd maskedImage

    Inputs:
    - coadd: a chiSquared coadd MaskedImage
    - numBins: number of bins for histogram
    - numImages: number of images that went into the coadd

    Returns:
    - histX: x values for histogram of coadd data (counts)
    - histY: y values for histogram of coadd data (number of pixels)
    - chiSqY: chi squared distribution values corresponding to histX
    """
    coaddData = coadd.getImage().getArray()
    # undo normalization
    coaddData *= float(numImages)
    # get rid of nans and infs
    goodData = np.extract(np.isfinite(coaddData.flat), coaddData.flat)
    goodData = np.extract(goodData < 50, goodData)

    # compute histogram
    histY, binEdges = np.histogram(goodData, bins=numBins)
    histX = binEdges[0:-1]
    histY = np.array(histY, dtype=float)  # convert from int to float
    histY /= histY.sum()

    # compute chiSq probability distribution; chi squared order = numImages
    chiSqY = np.power(histX, (numImages / 2.0) - 1) * np.exp(-histX / 2.0)
    chiSqY /= chiSqY.sum()

    return (histX, histY, chiSqY)
示例#17
0
def sub_arr(array, lim, con_array = None, min=None, max=None, boundaries=True):
    """Purpose: Extract sub array of values between min and max limits
    arguements:
     array          var     array to take subset of
     lim            var     array containing [min, max]
     boundaries     bool    include boundaries ie <= and >=
    keywords:
     con_array      var     condition array to apply min/max check on
    Outputs:
     array of values in array with indices satisfying min < con_array < max
    Call example: 
     function()

    TODO: implement only using max or min
    """
    array = check_array(array) # check array is a numpy array

    assert lim[1] >= lim[0], 'min > max'

    if con_array == None: # If no separate array supplied use same array for min/max
        con_array = array
    else: 
        assert np.size(con_array) != np.size(array), 'WARNING: size(con_array) != size(array)'

    if boundaries == True:
        sub = np.extract( (con_array>=lim[0]) * (con_array<=lim[1]), array)
    else:
        sub = np.extract( (con_array>lim[0]) * (con_array<lim[1]), array)
    return sub
示例#18
0
def chi_profs(bulge_prof, disk_prof, mask, weight, resid, hrad, 
              smoothed = False, smooth_scale = 2.0):
    galprof = bulge_prof + disk_prof
    if smoothed:
        resid = filters.gaussian_filter(resid**2, smooth_scale)
        weight = filters.gaussian_filter(weight**2, smooth_scale)
    else:
        resid = resid**2
        weight = weight**2

        
    new_mask = np.where(galprof > np.max(galprof)/100.0, 1,0)    
    galprof = image_info(galprof, mask=new_mask)
    profile = image_info(resid/weight, mask = mask, x_ctr = galprof.x_ctr, y_ctr = galprof.y_ctr, ell = galprof.ba, pa= galprof.pa, zoom =-1 )
    profile.profile()

    rads = np.array(profile.rads)*0.396/hrad
    profs = np.array(profile.prof)
    cum_profs = np.array(profile.aperflux)/np.array(profile.included_pix)
    
    cum_profs = np.extract(rads>0, cum_profs)
    profs = np.extract(rads>0, profs)
    rads = np.extract(rads>0, rads)

    x = np.arange(0.1, 4.0,0.1)
    s = interp.InterpolatedUnivariateSpline(rads,cum_profs)
    ynew = s(x)
    
    return x, ynew 
示例#19
0
def extract_velocity_cells_non_sphere( vlsvReader, cellid, origin, radius ):
   ''' Retrieves the velocity cells within a given sphere and returns the population outside the given sphere
       :param vlsvReader:         Some VlsvFile with a file open
       :param cellid:             The cellid whose rho to calculate
       :param origin:             Origin for the sphere
       :param radius:             Radius for the sphere
       :returns: Non backstream velocity cells and their avgs values as [vcellids, avgs]
   '''
   # Read the velocity cells:
   velocity_cell_data = vlsvReader.read_velocity_cells(cellid)
   # Get cells:
   vcellids = velocity_cell_data.keys()
   # Get avgs data:
   avgs = velocity_cell_data.values()
   # Get a list of velocity coordinates shifted by the solar wind bulk velocity:
   origin = np.array(origin)
   v = vlsvReader.get_velocity_cell_coordinates(vcellids) - origin
   # Get sum of radiuses:
   radiuses = np.sum(np.abs(v)**2,axis=-1)
   # Check radius condition
   radius2 = radius**2
   condition = (radiuses > radius2)
   # Get the velocity cells of nonsphere
   vcellids_nonsphere = np.extract(condition, vcellids)
   # Get the avgs
   avgs_nonsphere = np.extract(condition, avgs)
   # Return
   return [vcellids_nonsphere, avgs_nonsphere]
示例#20
0
def dataExtraction(data = 'train', class1 = 1, class0 = 0):
    import pickle, gzip
    # Load the dataset
    f = gzip.open('mnist.pkl.gz', 'rb')
    train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
    f.close()
    
    if data is 'test':
        [data, labels] = test_set
    else:
        data   = np.concatenate((train_set[0], valid_set[0]), axis = 0)
        labels = np.concatenate((train_set[1], valid_set[1]), axis = 0)
        
    y1 = np.extract(labels == class1, labels)
    X1 = data[labels == class1, :]
    
    y0 = np.extract(labels == class0, labels)
    X0 = data[labels == class0, :]

    y = np.concatenate((y1, y0), axis = 0)
    X = np.concatenate((X1, X0), axis = 0)
    
    #X = (X - np.mean(X, axis = 0)) / (1 + np.std(X, axis = 0)) # Data Normalization
    y[y == class1] = 1
    y[y == class0] = 0
    y = np.reshape(y, (np.shape(X)[0], 1))
    return y, X
示例#21
0
def calculateStaticBiases (data):
    ## calculate global bias
    # calculate mean of all the ratings
    globalBias = numpy.zeros([1,1])
    globalBias[0,0] = numpy.mean(data[:,2])
    numpy.savetxt(globalBiasResult,globalBias,delimiter=';')
    
    # prepare the users' biases matrix    
    numberOfUsers = len(numpy.unique(data[:,0]))
    userMatIdBias = numpy.zeros([numberOfUsers, 2])
    
    # prepare the items' biases matrix
    numberOfItems = len(numpy.unique(data[:,1]))
    itemMatIdBias = numpy.zeros([numberOfItems, 2])
        
    ## calculate user bias    
    # calculate mean rating for the user in the condition
    for i, usrIndex in enumerate(numpy.unique(data[:,0])):
        condition = data[:,0]==usrIndex
        f=numpy.mean(numpy.extract(condition, data[:,2]))
        f=f-globalBias[0,0]
        userMatIdBias[i,:] = [usrIndex,f]
        numpy.savetxt(userBiasesResult,userMatIdBias,delimiter=';')
       
    ## calculate item bias 
    # calculate mean rating for the item in the condition
    for i, itmIndex in enumerate(numpy.unique(data[:,1])):
        condition = data[:,1]==itmIndex
        f=numpy.mean(numpy.extract(condition, data[:,2]))
        f = f-globalBias[0,0]
        itemMatIdBias[i,:] = [itmIndex,f]
        numpy.savetxt(itemBiasesResult,itemMatIdBias,delimiter=';')
示例#22
0
def normalize_player_name(img_name, debug=False):
    img_name_w = matcher.MM_WHITE(sat=(0, 96), visibility=(48, 255))(img_name)

    img_name_x_hist = np.extract(np.sum(img_name_w, axis=0) > 128, np.arange(img_name_w.shape[1]))

    img_name_y_hist = np.extract(np.sum(img_name_w, axis=1) > 128, np.arange(img_name_w.shape[0]))

    if (len(img_name_x_hist) == 0) or (len(img_name_y_hist) == 0):
        # In some cases, we can't find any pixels.
        return None

    img_name_left = np.min(img_name_x_hist)
    img_name_right = np.max(img_name_x_hist)

    img_name_top = np.min(img_name_y_hist)
    img_name_bottom = np.max(img_name_y_hist)

    # Cropping error? should be handled gracefully.
    if not (img_name_left < img_name_right):
        return None

    img_name_w = img_name_w[img_name_top:img_name_bottom, img_name_left:img_name_right]

    img_name_w_norm = np.zeros((15, 250), dtype=np.uint8)
    img_name_w_norm[:, 0 : img_name_w.shape[1]] = cv2.resize(img_name_w, (img_name_w.shape[1], 15))

    if debug:
        print(img_name_w_norm.shape)
        cv2.imshow("name", img_name_w_norm)
        cv2.waitKey(1)

    return img_name_w_norm
示例#23
0
def boxoverlap(regions_a, region_b, thre):
    # (x1,y1) top-left coord, (x2,y2) bottom-right coord, (w,h) size
    TP=NP=0;
    TP_all=NP_all=0
    N=len(region_b);
    
    for (xb,yb,wb,hb) in region_b:
        x1=np.maximum(regions_a[:,0],xb);
        y1=np.maximum(regions_a[:,1],yb);
        x2=np.minimum((regions_a[:,2]+regions_a[:,0]),(xb+wb));
        y2=np.minimum((regions_a[:,3]+regions_a[:,1]),(yb+hb));
        print x1,y1,x2,y2
        w=x2-x1+1;
        h=y2-y1+1;
        inter=w*h;
        aarea=(regions_a[:,2]+1)*(regions_a[:,3]+1);
        barea=(wb+1)*(hb+1);

        #intersection over union overlap
        o=inter/(aarea+float(barea)-inter);
        
        #set invalid entries to 0 overlap
        o[w<=0]=0
        o[h<=0]=0
        TP=len(np.extract(o>=thre, o))
        NP=len(np.extract(o<thre, o))
        TP_all=TP_all+TP
        
    NP_all=NP-TP_all
    if NP_all<0:
        NP_all=0
        
    return TP_all, NP_all, N; 
示例#24
0
	def computeMcNemarSignificance(self, truth, predictions1, predictions2):
		condition = (truth == 1)
		truth = numpy.extract(condition, truth)
		predictions1 = numpy.extract(condition, predictions1)
		predictions2 = numpy.extract(condition, predictions2)
	
		evals1 = (predictions1 == truth)
		evals2 = (predictions2 == truth)
		
		# Misclassified by the first model only: c01.
		# Misclassified by the second model only: c10.
		c01, c10 = 0, 0
	
		for i, eval1 in enumerate(evals1):
			eval2 = evals2[i]
			if eval1 == 0 and eval2 == 1:
				c01 += 1
			if eval1 == 1 and eval2 == 0:
				c10 += 1
		
		if c01 + c10 < 20:
			print "Unreliable conclusion:", c01, c10
			return 0.0
		else:
			return math.pow(abs(c01 - c10) - 1, 2) / (c01 + c10)
示例#25
0
    def running_ave(self, rad_pix, good_image, edges):
        rad_out = []
        prof_out = []
        proferr_out = []
        aperflux = []
        included_pix = []

        for curr_edge in np.arange(len(edges)-1):
            #print 'edges ', edges[curr_edge],edges[curr_edge+1]
            tmp_im = np.extract(rad_pix < edges[curr_edge+1], good_image)
            tmp_rad = np.extract(rad_pix < edges[curr_edge+1], rad_pix)

            #print 'rads1'
            #print tmp_rad[:10]
            #raw_input()

            if len(tmp_rad)>0:
                aper_tmp = np.sum(tmp_im)
                inc_tmp = float(tmp_im.size)
            
                tmp_im = np.extract(tmp_rad >= edges[curr_edge], tmp_im)
                tmp_rad = np.extract(tmp_rad >= edges[curr_edge], tmp_rad)
                
                #print 'rads2'
                #print tmp_rad[:10]
                #raw_input()

                if len(tmp_rad) > 0:
                    rad_out.append(np.mean(tmp_rad))
                    prof_out.append(np.mean(tmp_im))
                    proferr_out.append(np.std(tmp_im))#/(tmp_im.size-1))
                    aperflux.append(aper_tmp)
                    included_pix.append(inc_tmp)

        return rad_out, prof_out,proferr_out, aperflux, included_pix
示例#26
0
def rain_split(qlwell, channel_num=0, threshold=None, pct_boundary=0.3, split_all_peaks=False):
    """
    Splits between rain and non-rain.  If you want the well's auto threshold to be used,
    use None as a threshold parameter (the default).
    If you do not want a threshold to be calculated, use '0'. (little unclear from the spec)

    Returns tuple (rain, non-rain)
    """
    if threshold is None:
        threshold = qlwell.channels[channel_num].statistics.threshold
    
    ok_peaks = accepted_peaks(qlwell)
    prain, rain, nrain, p_thresh, mh_thresh, ml_thresh, l_thresh = \
        rain_pvalues_thresholds(ok_peaks, channel_num=channel_num, threshold=threshold, pct_boundary=pct_boundary)

    if split_all_peaks:
        peaks = qlwell.peaks
    else:
        peaks = ok_peaks
    # this would be useful as a standalone, but for efficiency's sake will cut out for now        
    rain_condition_arr = [channel_amplitudes(peaks, channel_num) > p_thresh]
    if mh_thresh and ml_thresh:
        rain_condition_arr.append(np.logical_and(channel_amplitudes(peaks, channel_num) > ml_thresh,
                                              channel_amplitudes(peaks, channel_num) < mh_thresh))
    rain_condition_arr.append(channel_amplitudes(peaks, channel_num) < l_thresh)
    rain_condition = reduce(np.logical_or, rain_condition_arr)
    nonrain_condition = np.logical_not(rain_condition)

    rain = np.extract(rain_condition, peaks)
    nonrain = np.extract(nonrain_condition, peaks)
    return rain, nonrain
示例#27
0
def gap_rain(qlwell, channel_num=0, threshold=None, pct_boundary=0.3, gap_size=10000):
    """
    Return the rain in the gaps between non-rain droplets.
    """
    rain, nonrain = rain_split(qlwell,
                               channel_num=channel_num,
                               threshold=threshold,
                               pct_boundary=pct_boundary)
    
    # ok, now identify the gaps in the gates.
    times = peak_times(nonrain)
    if nonrain is None or len(nonrain) < 2:
        return np.ndarray([0],dtype=peak_dtype(2))
    
    intervals = np.ediff1d(times, to_begin=0, to_end=0)
    big_intervals = intervals > gap_size

    # find beginning of gaps with extract
    beginnings = np.extract(big_intervals[1:], times)
    ends = np.extract(big_intervals[:-1], times)

    gap_intervals = zip(beginnings, ends)
    gap_intervals.insert(0, (0, times[0]))
    gap_intervals.append((times[-1], times[-1]*100))
    
    # count the rain in the intervals
    gap_drops = np.extract(reduce(np.logical_or, [np.logical_and(peak_times(rain) > b,
                                                                 peak_times(rain) < e) for b, e in gap_intervals]),
                           rain)
    return gap_drops
示例#28
0
def auroc(y_prob, y_true):
    #threshold = [0, 0.2, 0.4, 0.6, 0.7, 0.8, 0.9, 0.99, 0.9999, 0.99999, 0.999999, 1.01]
    threshold = np.sort(np.unique(y_prob))
    threshold[0] = 0
    threshold[threshold.shape[0]-1] = 1
    tpr = np.empty_like(threshold)
    fpr = np.empty_like(threshold)
    for i in range(0, len(threshold)):
        #print threshold[i]
        predicted = np.empty_like(y_prob)
        for j in range(0,y_prob.shape[0]):
            if threshold[len(threshold)-1-i] != 1:
                predicted[j] = int(y_prob[j] >= threshold[len(threshold)-1-i])
            else:
                predicted[j] = 0
        a = np.extract(y_true == 1, predicted-y_true)
        tpr[i] = float(a.shape[0] - np.count_nonzero(a))/np.count_nonzero(y_true)
        b = np.extract(y_true == 0, predicted-y_true)
        fpr[i] = np.count_nonzero(b)/float(y_true.shape[0]-np.count_nonzero(y_true))
    #roc = interp1d(fpr, tpr, kind='linear')
    roc_auc = trapz(tpr, fpr)
    #print 'tpr', tpr
    #print 'fpr', fpr
    #print 'roc_auc', roc_auc
    return tpr, fpr, roc_auc
def explore_transformation(dirty_dir, clean_dir, num_stds=0):
	images = load_images(dirty_dir)
	cleaned_images = load_images(clean_dir)
	for key in images.keys():
		image = images[key]
		image_c = third_pass_filter(image)
		cv2.imshow('original', image)
		cv2.moveWindow('original', 0, 0)
		cv2.imshow('cleaned by me', image_c)
		cv2.moveWindow('cleaned by me', 500, 0)
		them = cleaned_images[key]
		cv2.imshow('clean', them)
		cv2.moveWindow('clean', 500, 300)

		consider_white = 200
		important_me = np.extract(image_c.flatten() < consider_white, image_c.flatten())
		important_them = np.extract(them.flatten() < consider_white, them.flatten())

		bins = 20
		plt.hist(important_them, bins, label='them')
		plt.hist(important_me, bins, label='me')
		plt.legend(loc='upper right')
		#plt.show()

		cv2.waitKey(0)
		plt.close()
		cv2.destroyAllWindows()
示例#30
0
def plot_dec_boundary(theta, x, y, mapFeat=False):
    theta = np.matrix(theta).T
    if mapFeat is True:
        u = np.linspace(-1, 1.5, 50)
        v = np.linspace(-1, 1.5, 50)
    else:
        u = np.linspace(np.min(x[:,1]), np.max(x[:,1]), 50)
        v = np.linspace(np.min(x[:,2]), np.max(x[:,2]), 50)

    z = np.zeros((u.size, v.size))

    #% Evaluate z = theta*x over the grid
    for i in range(0,u.size):
        for j in range(0,v.size):
            if mapFeat is True:
                x_m = mapfeat(u[i], v[j]) 
            else:
                #x_m = np.matrix([1,u[i],v[j],u[i]**2,u[i]**4])
                x_m = np.matrix([1,u[i],v[j],u[i]**2])
            z[i,j] = x_m * theta
    z = z.T # important to transpose z before calling contour
    print z, z.shape

    # Plot z = 0
    # Notice you need to specify the range [0, 0]
    plt.figure()
    #plt.scatter(x[:, 1], x[:, 2], c=y, cmap=plt.cm.Paired)
    plt.scatter(np.extract(y==1, x[:, 1]), np.extract(y==1, x[:, 2]), c='b', marker='o', label='admitted')
    plt.scatter(np.extract(y==0, x[:, 1]), np.extract(y==0, x[:, 2]), c='r', marker='o', label='declined')
    #ax = fig.add_subplot(111)
    plt.xlabel('Test 1 scores')
    plt.ylabel('Test 2 scores')
    plt.legend()
    plt.contour(u, v, z, [0,0], linewidth = 2, cmap=plt.cm.Paired)
    plt.show()
示例#31
0
def versuch_auswerten(versuch_werte, versuch_name, header):

    # Werte verarbeitbar machen
    delta_l_values = pd.to_numeric(versuch_werte.delta_l_t).values
    delta_r_values = pd.to_numeric(versuch_werte.delta_r_t).values
    delta_m_values = pd.to_numeric(versuch_werte.delta_m_t).values
    geschwindigkeit_l_values = pd.to_numeric(
        versuch_werte.geschwindigkeit_l).values
    geschwindigkeit_r_values = pd.to_numeric(
        versuch_werte.geschwindigkeit_r).values
    geschwindigkeit_m_values = pd.to_numeric(
        versuch_werte.geschwindigkeit_m).values
    tendenz_l_values = pd.to_numeric(versuch_werte.tendenz_l).values
    tendenz_r_values = pd.to_numeric(versuch_werte.tendenz_r).values
    tendenz_m_values = pd.to_numeric(versuch_werte.tendenz_m).values
    blick_l_x_values = pd.to_numeric(versuch_werte.blick_l_x).values
    blick_l_y_values = pd.to_numeric(versuch_werte.blick_l_y).values
    blick_r_x_values = pd.to_numeric(versuch_werte.blick_r_x).values
    blick_r_y_values = pd.to_numeric(versuch_werte.blick_r_y).values
    sacc_m_values = pd.to_numeric(versuch_werte.sacc_m).values
    sacc_l_values = pd.to_numeric(versuch_werte.sacc_l).values
    sacc_r_values = pd.to_numeric(versuch_werte.sacc_r).values

    # Mittelkwerte bestimmen
    # Kein Exceptionhandling, da ein leeres Array dazu fuehrt, dass np.mean() nan zurueckgibt und keine Exception
    if delta_l_values[np.nonzero(delta_l_values)].size == 0:
        mean_delta_l = -1
    else:
        mean_delta_l = np.mean(delta_l_values[np.nonzero(delta_l_values)])
    if delta_r_values[np.nonzero(delta_r_values)].size == 0:
        mean_delta_r = -1
    else:
        mean_delta_r = np.mean(delta_r_values[np.nonzero(delta_r_values)])
    if delta_m_values[np.nonzero(delta_m_values)].size == 0:
        mean_delta_m = -1
    else:
        mean_delta_m = np.mean(delta_m_values[np.nonzero(delta_m_values)])
    if geschwindigkeit_l_values[np.nonzero(
            geschwindigkeit_l_values)].size == 0:
        mean_geschwindigkeit_l = -1
    else:
        mean_geschwindigkeit_l = np.mean(
            geschwindigkeit_l_values[np.nonzero(geschwindigkeit_l_values)])
    if geschwindigkeit_r_values[np.nonzero(
            geschwindigkeit_r_values)].size == 0:
        mean_geschwindigkeit_r = -1
    else:
        mean_geschwindigkeit_r = np.mean(
            geschwindigkeit_r_values[np.nonzero(geschwindigkeit_r_values)])
    if geschwindigkeit_m_values[np.nonzero(
            geschwindigkeit_m_values)].size == 0:
        mean_geschwindigkeit_m = -1
    else:
        mean_geschwindigkeit_m = np.mean(
            geschwindigkeit_m_values[np.nonzero(geschwindigkeit_m_values)])

    header = np.append(header, [
        versuch_name + '_mean_delta_l', versuch_name + '_mean_delta_r',
        versuch_name + '_mean_delta_m', versuch_name +
        '_mean_geschwindigkeit_l', versuch_name + '_mean_geschwindigkeit_r',
        versuch_name + '_mean_geschwindigkeit_m'
    ])

    # Maxima bestimmen
    try:
        max_delta_l = np.max(delta_l_values[np.nonzero(delta_l_values)])
    except ValueError:
        max_delta_l = -1
    try:
        max_delta_r = np.max(delta_r_values[np.nonzero(delta_r_values)])
    except ValueError:
        max_delta_r = -1
    try:
        max_delta_m = np.max(delta_m_values[np.nonzero(delta_m_values)])
    except ValueError:
        max_delta_m = -1
    try:
        max_geschwindigkeit_l = np.max(
            geschwindigkeit_l_values[np.nonzero(geschwindigkeit_l_values)])
    except ValueError:
        max_geschwindigkeit_l = -1
    try:
        max_geschwindigkeit_r = np.max(
            geschwindigkeit_r_values[np.nonzero(geschwindigkeit_r_values)])
    except ValueError:
        max_geschwindigkeit_r = -1
    try:
        max_geschwindigkeit_m = np.max(
            geschwindigkeit_m_values[np.nonzero(geschwindigkeit_m_values)])
    except ValueError:
        max_geschwindigkeit_m = -1

    header = np.append(header, [
        versuch_name + '_max_delta_l', versuch_name + '_max_delta_r',
        versuch_name + '_max_delta_m', versuch_name + '_max_geschwindigkeit_l',
        versuch_name + '_max_geschwindigkeit_r',
        versuch_name + '_max_geschwindigkeit_m'
    ])

    # Minima bestimmen
    #Exceptionhandling fuer die Versuchspersonen, bei denen nur ein Auge gemessen wurde
    try:
        min_delta_l = np.min(delta_l_values[np.nonzero(delta_l_values)])
    except ValueError:
        min_delta_l = -1
    try:
        min_delta_r = np.min(delta_r_values[np.nonzero(delta_r_values)])
    except ValueError:
        min_delta_r = -1
    try:
        min_delta_m = np.min(delta_m_values[np.nonzero(delta_m_values)])
    except ValueError:
        min_delta_m = -1
    try:
        min_geschwindigkeit_l = np.min(
            geschwindigkeit_l_values[np.nonzero(geschwindigkeit_l_values)])
    except ValueError:
        min_geschwindigkeit_l = -1
    try:
        min_geschwindigkeit_r = np.min(
            geschwindigkeit_r_values[np.nonzero(geschwindigkeit_r_values)])
    except ValueError:
        min_geschwindigkeit_r = -1
    try:
        min_geschwindigkeit_m = np.min(
            geschwindigkeit_m_values[np.nonzero(geschwindigkeit_m_values)])
    except ValueError:
        min_geschwindigkeit_m = -1

    header = np.append(header, [
        versuch_name + '_min_delta_l', versuch_name + '_min_delta_r',
        versuch_name + '_min_delta_m', versuch_name + '_min_geschwindigkeit_l',
        versuch_name + '_min_geschwindigkeit_r',
        versuch_name + '_min_geschwindigkeit_m'
    ])

    # Standardabweichungen berechnen
    if delta_l_values[np.nonzero(delta_l_values)].size == 0:
        std_delta_l = -1
    else:
        std_delta_l = np.std(delta_l_values[np.nonzero(delta_l_values)])
    if delta_r_values[np.nonzero(delta_r_values)].size == 0:
        std_delta_r = -1
    else:
        std_delta_r = np.std(delta_r_values[np.nonzero(delta_r_values)])
    if delta_m_values[np.nonzero(delta_m_values)].size == 0:
        std_delta_m = -1
    else:
        std_delta_m = np.std(delta_m_values[np.nonzero(delta_m_values)])
    if geschwindigkeit_l_values[np.nonzero(
            geschwindigkeit_l_values)].size == 0:
        std_geschwindigkeit_l = -1
    else:
        std_geschwindigkeit_l = np.std(
            geschwindigkeit_l_values[np.nonzero(geschwindigkeit_l_values)])
    if geschwindigkeit_r_values[np.nonzero(
            geschwindigkeit_r_values)].size == 0:
        std_geschwindigkeit_r = -1
    else:
        std_geschwindigkeit_r = np.std(
            geschwindigkeit_r_values[np.nonzero(geschwindigkeit_r_values)])
    if geschwindigkeit_m_values[np.nonzero(
            geschwindigkeit_m_values)].size == 0:
        std_geschwindigkeit_m = -1
    else:
        std_geschwindigkeit_m = np.std(
            geschwindigkeit_m_values[np.nonzero(geschwindigkeit_m_values)])

    header = np.append(header, [
        versuch_name + '_standardabweichung_delta_l',
        versuch_name + '_standardabweichung_delta_r',
        versuch_name + '_standardabweichung_delta_m',
        versuch_name + '_standardabweichung_geschwindigkeit_l',
        versuch_name + '_standardabweichung_geschwindigkeit_r',
        versuch_name + '_standardabweichung_geschwindigkeit_m'
    ])

    # Varianzen berechnen
    if delta_l_values[np.nonzero(delta_l_values)].size == 0:
        var_delta_l = -1
    else:
        var_delta_l = np.var(delta_l_values[np.nonzero(delta_l_values)])
    if delta_r_values[np.nonzero(delta_r_values)].size == 0:
        var_delta_r = -1
    else:
        var_delta_r = np.var(delta_r_values[np.nonzero(delta_r_values)])
    if delta_m_values[np.nonzero(delta_m_values)].size == 0:
        var_delta_m = -1
    else:
        var_delta_m = np.var(delta_m_values[np.nonzero(delta_m_values)])
    if geschwindigkeit_l_values[np.nonzero(
            geschwindigkeit_l_values)].size == 0:
        var_geschwindigkeit_l = -1
    else:
        var_geschwindigkeit_l = np.var(
            geschwindigkeit_l_values[np.nonzero(geschwindigkeit_l_values)])
    if geschwindigkeit_r_values[np.nonzero(
            geschwindigkeit_r_values)].size == 0:
        var_geschwindigkeit_r = -1
    else:
        var_geschwindigkeit_r = np.var(
            geschwindigkeit_r_values[np.nonzero(geschwindigkeit_r_values)])
    if geschwindigkeit_m_values[np.nonzero(
            geschwindigkeit_m_values)].size == 0:
        var_geschwindigkeit_m = -1
    else:
        var_geschwindigkeit_m = np.var(
            geschwindigkeit_m_values[np.nonzero(geschwindigkeit_m_values)])

    header = np.append(header, [
        versuch_name + '_varianz_delta_l', versuch_name + '_varianz_delta_r',
        versuch_name + '_varianz_delta_m',
        versuch_name + '_varianz_geschwindigkeit_l',
        versuch_name + '_varianz_geschwindigkeit_r',
        versuch_name + '_varianz_geschwindigkeit_m'
    ])

    # Tendenz auswerten
    condition_voraus_l = np.equal(tendenz_l_values, 1)
    num_voraus_l = len(np.extract(condition_voraus_l, tendenz_l_values))
    condition_voraus_r = np.equal(tendenz_r_values, 1)
    num_voraus_r = len(np.extract(condition_voraus_r, tendenz_r_values))
    condition_voraus_m = np.equal(tendenz_m_values, 1)
    num_voraus_m = len(np.extract(condition_voraus_m, tendenz_m_values))
    condition_hinter_l = np.equal(tendenz_l_values, -1)
    num_hinter_l = len(np.extract(condition_hinter_l, tendenz_l_values))
    condition_hinter_r = np.equal(tendenz_r_values, -1)
    num_hinter_r = len(np.extract(condition_hinter_r, tendenz_r_values))
    condition_hinter_m = np.equal(tendenz_m_values, -1)
    num_hinter_m = len(np.extract(condition_hinter_m, tendenz_m_values))

    # -100 steht fuer keinen errechneten Wert, sondern fuer nich vorhanden.
    if num_voraus_l == 0 and num_hinter_l == 0:
        tendenz_l = -100
    else:
        if num_voraus_l > num_hinter_l:
            tendenz_l = 1
        else:
            if num_hinter_l > num_voraus_l:
                tendenz_l = -1
            else:
                tendenz_l = 0

    if num_voraus_r == 0 and num_hinter_r == 0:
        tendenz_r = -100
    else:
        if num_voraus_r > num_hinter_r:
            tendenz_r = 1
        else:
            if num_hinter_r > num_voraus_r:
                tendenz_r = -1
            else:
                tendenz_r = 0

    if num_voraus_m == 0 and num_hinter_m == 0:
        tendenz_m = -100
    else:
        if num_voraus_m > num_hinter_m:
            tendenz_m = 1
        else:
            if num_hinter_m > num_voraus_m:
                tendenz_m = -1
            else:
                tendenz_m = 0

    header = np.append(header, [
        versuch_name + '_tendenz_l', versuch_name + '_tendenz_r',
        versuch_name + '_tendenz_m'
    ])

    # Berechnung der Kovarianz vom linken und rechten Auge
    cov_x = np.cov(blick_l_x_values, blick_r_x_values)[0][1]
    cov_y = np.cov(blick_l_y_values, blick_r_y_values)[0][1]

    header = np.append(header, [
        versuch_name + '_Kovarianz_blick_x',
        versuch_name + '_Kovarianz_blick_y'
    ])

    verhaeltnis_l_x_da = blick_l_x_values[np.nonzero(
        blick_l_x_values)].size / blick_l_x_values.size
    verhaeltnis_l_y_da = blick_l_y_values[np.nonzero(
        blick_l_y_values)].size / blick_l_y_values.size
    verhaeltnis_r_x_da = blick_r_x_values[np.nonzero(
        blick_r_x_values)].size / blick_r_x_values.size
    verhaeltnis_r_y_da = blick_r_y_values[np.nonzero(
        blick_r_y_values)].size / blick_r_y_values.size

    sacc_m = np.sum(sacc_m_values)
    sacc_l = np.sum(sacc_l_values)
    sacc_r = np.sum(sacc_r_values)
    if versuch_name == 'Horizontal' or versuch_name == 'Liegende_8_schnell':
        sacc_rate_m = sacc_m / (999 * 4)
        sacc_rate_l = sacc_l / (999 * 4)
        sacc_rate_r = sacc_r / (999 * 4)
    else:
        sacc_rate_m = sacc_m / (999 * 5)
        sacc_rate_l = sacc_l / (999 * 5)
        sacc_rate_r = sacc_r / (999 * 5)

    header = np.append(header, [
        versuch_name + '_links_verhaeltnis_x', versuch_name +
        '_links_verhaeltnis_y', versuch_name + '_rechts_verhaeltnis_x',
        versuch_name + '_rechts_verhaeltnis_y', versuch_name + '_sacc_m',
        versuch_name + '_sacc_rate_m', versuch_name + '_sacc_l', versuch_name +
        '_sacc_rate_l', versuch_name + '_sacc_r', versuch_name + '_sacc_rate_r'
    ])

    yield [[
        mean_delta_l, mean_delta_r, mean_delta_m, mean_geschwindigkeit_l,
        mean_geschwindigkeit_r, mean_geschwindigkeit_m, max_delta_l,
        max_delta_r, max_delta_m, max_geschwindigkeit_l, max_geschwindigkeit_r,
        max_geschwindigkeit_m, min_delta_l, min_delta_r, min_delta_m,
        min_geschwindigkeit_l, min_geschwindigkeit_r, min_geschwindigkeit_m,
        std_delta_l, std_delta_r, std_delta_m, std_geschwindigkeit_l,
        std_geschwindigkeit_r, std_geschwindigkeit_m, var_delta_l, var_delta_r,
        var_delta_m, var_geschwindigkeit_l, var_geschwindigkeit_r,
        var_geschwindigkeit_m, tendenz_l, tendenz_r, tendenz_m, cov_x, cov_y,
        verhaeltnis_l_x_da, verhaeltnis_l_y_da, verhaeltnis_r_x_da,
        verhaeltnis_r_y_da, sacc_m, sacc_rate_m, sacc_l, sacc_rate_l, sacc_r,
        sacc_rate_r
    ]]
    yield header
示例#32
0
def categorical_crossentropy(preds, labels):
    return np.mean(-np.log(np.extract(labels, preds)))
示例#33
0
def sawtooth(t, width=1):
    """
    Return a periodic sawtooth or triangle waveform.

    The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
    interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
    ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].

    Note that this is not band-limited.  It produces an infinite number
    of harmonics, which are aliased back and forth across the frequency
    spectrum.

    Parameters
    ----------
    t : array_like
        Time.
    width : array_like, optional
        Width of the rising ramp as a proportion of the total cycle.
        Default is 1, producing a rising ramp, while 0 produces a falling
        ramp.  `t` = 0.5 produces a triangle wave.
        If an array, causes wave shape to change over time, and must be the 
        same length as t.

    Returns
    -------
    y : ndarray
        Output array containing the sawtooth waveform.

    Examples
    --------
    A 5 Hz waveform sampled at 500 Hz for 1 second:

    >>> from scipy import signal
    >>> import matplotlib.pyplot as plt
    >>> t = np.linspace(0, 1, 500)
    >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))

    """
    t, w = asarray(t), asarray(width)
    w = asarray(w + (t - t))
    t = asarray(t + (w - w))
    if t.dtype.char in ['fFdD']:
        ytype = t.dtype.char
    else:
        ytype = 'd'
    y = zeros(t.shape, ytype)

    # width must be between 0 and 1 inclusive
    mask1 = (w > 1) | (w < 0)
    place(y, mask1, nan)

    # take t modulo 2*pi
    tmod = mod(t, 2 * pi)

    # on the interval 0 to width*2*pi function is
    #  tmod / (pi*w) - 1
    mask2 = (1 - mask1) & (tmod < w * 2 * pi)
    tsub = extract(mask2, tmod)
    wsub = extract(mask2, w)
    place(y, mask2, tsub / (pi * wsub) - 1)

    # on the interval width*2*pi to 2*pi function is
    #  (pi*(w+1)-tmod) / (pi*(1-w))

    mask3 = (1 - mask1) & (1 - mask2)
    tsub = extract(mask3, tmod)
    wsub = extract(mask3, w)
    place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
    return y
示例#34
0
    def do_error(self, line):
        """Override the error calculation for TTS

        The error is calculated as the vertical distance between theory points, in the current view,\
        calculated over all possible pairs of theory tables, when the theories overlap in the horizontal direction and\
        they correspond to files with the same Mw (if the parameters Mw2 and phi exist, their values are also
        used to classify the error). 1/2 of the error is added to each file.
        Report the error of the current theory on all the files.\n\
        File error is calculated as the mean square of the residual, averaged over all calculated points in the shifted tables.\n\
        Total error is the mean square of the residual, averaged over all points considered in all files.

        """
        total_error = 0
        npoints = 0
        view = self.parent_dataset.parent_application.current_view
        nfiles = len(self.parent_dataset.files)
        file_error = np.zeros(nfiles)
        file_points = np.zeros(nfiles, dtype=np.int)
        xth = []
        yth = []
        xmin = np.zeros((nfiles, view.n))
        xmax = np.zeros((nfiles, view.n))
        for i in range(nfiles):
            Filei = self.parent_dataset.files[i]
            xthi, ythi, success = view.view_proc(
                self.tables[Filei.file_name_short], Filei.file_parameters
            )
            # We need to sort arrays
            for k in range(view.n):
                x = xthi[:, k]
                p = x.argsort()
                xthi[:, k] = xthi[p, k]
                ythi[:, k] = ythi[p, k]
            xth.append(xthi)
            yth.append(ythi)

            xmin[i, :] = np.amin(xthi, 0)
            xmax[i, :] = np.amax(xthi, 0)

        # Mwset, Mw, Tdict = self.get_cases()
        MwUnique = {}
        for o in self.Mwset:
            MwUnique[o] = [0.0, 0]

        for i in range(nfiles):
            for j in range(i + 1, nfiles):
                if self.Mw[i] != self.Mw[j]:
                    continue
                for k in range(view.n):
                    condition = (xth[j][:, k] > xmin[i, k]) * (
                        xth[j][:, k] < xmax[i, k]
                    )
                    x = np.extract(condition, xth[j][:, k])
                    y = np.extract(condition, yth[j][:, k])
                    yinterp = interp(x, xth[i][:, k], yth[i][:, k])
                    error = np.sum((yinterp - y) ** 2)
                    npt = len(y)
                    total_error += error
                    npoints += npt
                    MwUnique[self.Mw[i]][0] += error
                    MwUnique[self.Mw[i]][1] += npt

        if line == "":
            # table='''<table border="1" width="100%">'''
            # table+='''<tr><th>Mw</th><th>Mw2</th><th>phi</th><th>phi2</th><th>Error</th><th># Pts.</th></tr>'''
            table = [
                [
                    "%-12s" % "Mw",
                    "%-12s" % "Mw2",
                    "%-12s" % "phi",
                    "%-12s" % "phi2",
                    "%-12s" % "Error",
                    "%-12s" % "# Pts.",
                ],
            ]
            p = list(MwUnique.keys())
            p.sort()
            for o in p:
                if MwUnique[o][1] > 0:
                    # table+='''<tr><td>%4g</td><td>%4g</td><td>%4g</td><td>%4g</td><td>%8.3g</td><td>(%5d)</td></tr>'''%(o[0], o[1], o[2], o[3], MwUnique[o][0] / MwUnique[o][1], MwUnique[o][1])
                    table.append(
                        [
                            "%-12.4g" % o[0],
                            "%-12.4g" % o[1],
                            "%-12.4g" % o[2],
                            "%-12.4g" % o[3],
                            "%-12.3g" % (MwUnique[o][0] / MwUnique[o][1]),
                            "%-12d" % MwUnique[o][1],
                        ]
                    )
                else:
                    # table+='''<tr><td>%4g</td><td>%4g</td><td>%4g</td><td>%4g</td><td>%s</td><td>(%5d)</td></tr>'''%(o[0], o[1], o[2], o[3], "-", MwUnique[o][1])
                    table.append(
                        [
                            "%-12.4g" % o[0],
                            "%-12.4g" % o[1],
                            "%-12.4g" % o[2],
                            "%-12.4g" % o[3],
                            "%-12s" % "-",
                            "%-12d" % MwUnique[o][1],
                        ]
                    )
            # table+='''</table><br>'''
            self.Qprint(table)
        if npoints > 0:
            total_error /= npoints
        else:
            total_error = 1e10
        if line == "":
            self.Qprint("<b>TOTAL ERROR</b>: %12.5g (%6d)<br>" % (total_error, npoints))
        return total_error
示例#35
0
    def calcularArea(
        self, clasesMuestras
    ):  #Calcula el area de la superficie del mapa de covertura del suelo (Raster o Vector)
        error = True
        self.reultados.append("Inicia proceso de cálculo de áreas")
        idLayerQg = self.itemSuperficie.currentData()
        if (idLayerQg == 1):
            if str(os.path.splitext(
                    self.rutaCSVSuperficie)[1]) == ".tif" or str(
                        os.path.splitext(self.rutaCSVSuperficie)[1]) == ".TIF":
                layerArea = QgsRasterLayer(self.rutaCSVSuperficie,
                                           "aleatorios", "gdal")
            else:
                layerArea = QgsVectorLayer(self.rutaCSVSuperficie,
                                           "aleatorios", "ogr")
        else:
            layerArea = QgsProject.instance().mapLayer(str(idLayerQg))
        if str(layerArea.type()) == "QgsMapLayerType.VectorLayer":
            features = layerArea.getFeatures()
            clases = self.columClase.currentText()
            features = layerArea.getFeatures()
            field = [f[clases] for f in features]
            Clasesunicas = np.unique(np.array(field))
            matrizAreaClass = np.empty((len(Clasesunicas), 2)).astype(str)
            if (len(clasesMuestras) == len(Clasesunicas)):
                #print(sorted(np.array(clasesMuestras).astype(str)))
                #print(sorted(np.array(Clasesunicas).astype(str)))
                classMapa = sorted(np.array(Clasesunicas).astype(str))
                classMuestras = sorted(np.array(clasesMuestras).astype(str))
                x = np.array_equal(classMuestras, classMapa)
                if (x):
                    i = 0
                    self.reultados.append("Áreas por clase:")
                    for clase in Clasesunicas:
                        expr = QgsExpression("\"{}\"='{}'".format(
                            clases, clase))
                        clasesArea = layerArea.getFeatures(
                            QgsFeatureRequest(expr))
                        suma = 0
                        for areas in clasesArea:
                            ha = areas.geometry().area() / 10000
                            suma = suma + ha

                        matrizAreaClass[i][0] = str(clase)
                        matrizAreaClass[i][1] = str(round(float(str(suma)), 0))
                        self.reultados.append(
                            str(matrizAreaClass[i][0]) + ": \t" +
                            str(matrizAreaClass[i][1]))
                        i += 1
                else:
                    QMessageBox.information(
                        self, "Error",
                        "Hay clases que no coinciden en vector de muestras y mapa temático",
                        QMessageBox.Ok)
                    error = False
                    matrizAreaClass = 0
            else:
                QMessageBox.information(
                    self, "Error",
                    "Las clases del vector de muestras no tiene la misma cantidad de clases que el mapa temático",
                    QMessageBox.Ok)
                error = False
                matrizAreaClass = 0

        else:
            pathRaster = layerArea.dataProvider().dataSourceUri()
            data = gdal.Open(pathRaster, gdal.GA_ReadOnly)
            getProjec = data.GetProjection()
            encontrarMetros = getProjec.find("metre")

            if encontrarMetros != -1:
                clasesNew = []
                geotr = data.GetGeoTransform()
                pixel_area = abs(geotr[1] * geotr[5])
                band = data.GetRasterBand(1).ReadAsArray().astype(int)
                unicos = np.unique(band)
                clasesMuestras = clasesMuestras.astype(str)
                unicos = unicos.astype(str)
                for clase in clasesMuestras:
                    if clase in unicos:
                        clasesNew.append(clase)
                if (len(clasesMuestras) == len(clasesNew)):
                    classMapa = sorted(np.array(clasesNew).astype(str))
                    classMuestras = sorted(
                        np.array(clasesMuestras).astype(str))
                    if (np.array_equal(classMuestras, classMapa)):
                        i = 0
                        self.reultados.append("Áreas por clase:")
                        matrizAreaClass = np.empty(
                            (len(classMapa), 2)).astype(str)
                        for clase in clasesNew:
                            condition = np.bitwise_not(band != int(clase))
                            totalClase = np.extract(condition, band)
                            total = len(totalClase)
                            matrizAreaClass[i][0] = str(clase)
                            matrizAreaClass[i][1] = str(
                                round(float(str((total * pixel_area) / 10000)),
                                      0))
                            self.reultados.append(
                                str(matrizAreaClass[i][0]) + ": \t" +
                                str(matrizAreaClass[i][1]))
                            i += 1
                    else:
                        QMessageBox.information(
                            self, "Error",
                            "Hay clases que no coinciden en vector de muestras y mapa temático",
                            QMessageBox.Ok)
                        error = False
                        matrizAreaClass = 0
                else:
                    QMessageBox.information(
                        self, "Error",
                        "Las clases del vector de muestras no tiene la misma cantidad de clases que el mapa temático",
                        QMessageBox.Ok)
                    error = False
                    matrizAreaClass = 0
            else:
                QMessageBox.information(
                    self, "Error",
                    "El Raster debe de estar en unidades Metricas",
                    QMessageBox.Ok)
                error = False
                matrizAreaClass = 0

        return matrizAreaClass, error
示例#36
0
def optimize(data, c_amp=1, c_pha=2, show_complex_plot=False):
    '''
    input:

        - data: data array of the form the dat_reader returns it:
          data = [[f1,f2,...,fn],...,[a1,a2,...,an],[ph1,ph2,...,phn],...]
          columns are to be two-dimensional raw data entries in order to enable error extraction,
          average data is accepted
        - c_amp, c_pha: columnmn identifiers for amplitude and phase data in the data array
        - show_complex_plot: (bool) (optional, default: False) plots the data points in the complex plane
                             if True, only in case the data columns in data are 1D
        
    output:

        - numpy data array with x values, data values and entries
          of the form [[x1,x2,...,xn],[v1,v2,...,vn],[e1,e2,...,en]] if raw data was given
    '''

    #generate complex data array
    try:
        c_raw = np.array(data[c_amp]) * np.exp(1j * np.array(data[c_pha]))
    except IndexError:
        print 'Bad column identifier...aborting.'
    except ValueError:
        print 'Faulty data input, dimension mismatch...aborting.'

    if len(c_raw.shape) > 1:
        #calculate mean of complex data
        c = np.mean(c_raw, axis=0)
    else:  #given data is already averaged
        c = c_raw

    #point in complex plane with maximum sumed mutual distances
    s = np.zeros_like(np.abs(c))
    for i in range(len(c)):
        for p in c:
            s[i] += np.abs(p - c[i])
    cmax = np.extract(s == np.max(s), c)

    #calculate distances
    data_opt = np.abs(c - cmax)

    if len(c_raw.shape) > 1:
        #calculate errors in line direction

        #find maximum complex point in maximum distance
        d = 0
        for p in c:
            if np.abs(p - cmax) > d:
                d = np.abs(p - cmax)
                cdist = p
        #find unit vector in line direction
        vunit = (cdist - cmax) / np.abs(cdist - cmax)

        #calculate projected distances via dot product, projecting along the data direction
        #errors via std
        dist_proj = [0] * len(c)
        errs = np.zeros_like(c)
        for i, ci in enumerate(c_raw.T):  #for each iteration
            dist_proj[i] = [
                np.abs(
                    np.vdot([np.real(vunit), np.imag(vunit)], [
                        np.real(cr) - np.real(c[i]),
                        np.imag(cr) - np.imag(c[i])
                    ])) for cr in ci
            ]
            errs[i] = np.std(dist_proj[i]) / np.sqrt(len(dist_proj[i]))

    #normalize optimized data
    data_opt -= np.min(data_opt)
    maxv = np.max(data_opt)
    data_opt /= maxv
    if len(c_raw.shape) > 1:
        errs /= maxv

    #gauss plane plot
    if show_complex_plot:
        if len(c_raw.shape) > 1:
            plt.figure(figsize=(10, 13))
            ax1 = plt.subplot2grid((4, 1), (0, 0))
            ax2 = plt.subplot2grid((4, 1), (1, 0), rowspan=3)
            ax1.errorbar(data_opt,
                         np.zeros_like(data_opt),
                         xerr=errs,
                         color='blue',
                         fmt='o',
                         elinewidth=0.8,
                         capsize=5,
                         markersize=8,
                         ecolor='red')
            ax1.plot([0], [0], '*', color='red', markersize=20)
            prange = np.max(data_opt) - np.min(data_opt)
            ax1.set_xlim(
                np.min(data_opt) - 0.05 * prange,
                np.max(data_opt) + 0.05 * prange)
            ax2.plot(np.real(c), np.imag(c), '.')
            ax2.plot(np.real(c)[:10], np.imag(c)[:10], '.',
                     color='r')  #show first 10 data points in red
            ax2.plot(np.real(cmax),
                     np.imag(cmax),
                     '*',
                     color='black',
                     markersize=15)
        else:
            plt.figure(figsize=(10, 10))
            plt.plot(np.real(c), np.imag(c), '.')
            plt.plot(np.real(c)[:10], np.imag(c)[:10], '.',
                     color='r')  #show first 10 data points in red
            plt.plot(np.real(cmax),
                     np.imag(cmax),
                     '*',
                     color='black',
                     markersize=15)

    if len(c_raw.shape) > 1:
        return np.array([
            np.real(np.array(data[0])),
            np.real(np.array(data_opt)),
            np.real(errs)
        ])
    else:
        return np.array(
            [np.real(np.array(data[0])),
             np.real(np.array(data_opt))])
示例#37
0
# In[3]:

start = pd.to_datetime('2019-04-01', format='%Y-%m-%d')
end = pd.to_datetime('2019-07-01', format='%Y-%m-%d')

print(f'start: {start}')
print(f'end: {end}')

# In[4]:

s_full = pd.array(df.iloc[:, 0])
t_full = pd.array(pd.DatetimeIndex(df.iloc[:, 1]).astype(
    np.int64)) / 1000000000

t_full = np.extract([s_full == 2], t_full)

dt = t_full[1] - t_full[0]
print(f'data sampling is {dt:.2f} secs')

# In[5]:

t_start = pd.DatetimeIndex([start]).astype(np.int64) / 1000000000
t_end = pd.DatetimeIndex([end]).astype(np.int64) / 1000000000

t = np.extract([(t_full >= t_start[0]) & (t_full <= t_end[0])], t_full)

t = (t - t[0]) / 60 / 60 / 24

y = np.extract([(t_full >= t_start[0]) & (t_full <= t_end[0])],
               df.iloc[:, 2]).astype(np.int64)
示例#38
0
 def get_mean_and_std(self, current_image):
     mask = self.get_mask(current_image)
     mean = np.mean(np.extract(mask, current_image))
     std = np.std(np.extract(mask, current_image))
     return mean, std
示例#39
0
def extract(cond, x):
    if isinstance(x, numbers.Number):
        return x
    else:
        return np.extract(cond, x)
示例#40
0
文件: ppl.py 项目: mbbrodie/stylegan2
def lerp(a, b, t):
    return a + (b - a) * t

    latent_dim = 512

    ckpt = torch.load(args.ckpt)

    g = Generator(args.size, latent_dim, 8).to(device)
    g.load_state_dict(ckpt['g_ema'])
    g.eval()

    percept = lpips.PerceptualLoss(model='net-lin',
                                   net='vgg',
                                   use_gpu=device.startswith('cuda'))

    distances = []

    n_batch = args.n_sample // args.batch
    resid = args.n_sample - (n_batch * args.batch)
    batch_sizes = [args.batch] * n_batch + [resid]

    with torch.no_grad():
        for batch in tqdm(batch_sizes):
            noise = g.make_noise()

            inputs = torch.randn([batch * 2, latent_dim], device=device)
            lerp_t = torch.rand(batch, device=device)

            if args.space == 'w':
                latent = g.get_latent(inputs)
                latent_t0, latent_t1 = latent[::2], latent[1::2]
                latent_e0 = lerp(latent_t0, latent_t1, lerp_t[:, None])
                latent_e1 = lerp(latent_t0, latent_t1,
                                 lerp_t[:, None] + args.eps)
                latent_e = torch.stack([latent_e0, latent_e1],
                                       1).view(*latent.shape)

            image, _ = g([latent_e], input_is_latent=True, noise=noise)

            if args.crop:
                c = image.shape[2] // 8
                image = image[:, :, c * 3:c * 7, c * 2:c * 6]

            factor = image.shape[2] // 256

            if factor > 1:
                image = F.interpolate(image,
                                      size=(256, 256),
                                      mode='bilinear',
                                      align_corners=False)

            dist = percept(image[::2], image[1::2]).view(
                image.shape[0] // 2) / (args.eps**2)
            distances.append(dist.to('cpu').numpy())

    distances = np.concatenate(distances, 0)

    lo = np.percentile(distances, 1, interpolation='lower')
    hi = np.percentile(distances, 99, interpolation='higher')
    filtered_dist = np.extract(
        np.logical_and(lo <= distances, distances <= hi), distances)

    print('ppl:', filtered_dist.mean())
示例#41
0
    def plot( self, name='phase-plot-name', time_unit='s', legend=None, 
            nrows=2, ncols=2, dpi=200):

        num_var = len(self.__df.columns)
        if num_var == 0:
            return

        today = datetime.datetime.today().strftime("%d%b%y %H:%M:%S")

        lead_name = name

        fig_num = None

        # Loop over variables and assign to the dashboards
        i_dash = 0
        for i_var in range(num_var):
            # if multiple of nrows*ncols start new dashboard
            if i_var % (nrows*ncols) == 0:

                if i_var != 0:  # flush any current figure
                    fig_name = lead_name+'-'+self.name+'-phase-plot-' + \
                            str(i_dash).zfill(2)
                    fig.savefig(fig_name+'.png', dpi=dpi, fomat='png')
                    plt.close(fig_num)

                    #pickle.dump( fig, open(fig_name+'.pickle','wb') )

                    i_dash += 1

                fig_num = str(np.random.random()) + '.' + str(i_dash)
                fig = plt.figure(num=fig_num)

                gs = gridspec.GridSpec(nrows, ncols)
#                gs.update(left=0.08, right=0.98, wspace=0.4, hspace=0.4)
                gs.update(left=0.11, right=0.98, wspace=0.4, hspace=0.5)

                axlst = list()

                nPlotsNeeded = num_var - i_var
                count = 0
                for i in range(nrows):
                    for j in range(ncols):
                        axlst.append(fig.add_subplot(gs[i, j]))
                        count += 1
                        if count == nPlotsNeeded:
                            break
                    if count == nPlotsNeeded:
                        break

                axes = np.array(axlst)

                text = today + ': Cortix.Phase.Plot'
                fig.text(.5, .95, text, horizontalalignment='center', fontsize=14)

                axs = axes.flat

                axId = 0

            # end of: if i_var % nrows*ncols == 0: # if a multiple of nrows*ncols
            # start a new dashboard

            ax = axs[axId]
            axId += 1

            col_name = self.__df.columns[i_var]

            species = self.get_species(col_name)
            if species:
                varName = species.formula_name
            else:
                quant = self.get_quantity(col_name)
                varName = quant.formal_name

            # sanity check
            if i_var <= len(self.__species):
                assert self.__species[i_var].name == self.__df.columns[i_var]
            else:
                assert self.__quantities[i_var].name == self.__df.columns[i_var]

            varUnit = 'g/L'

            '''
            if varUnit == 'gram':
                varUnit = 'g'
            if varUnit == 'gram/min':
                varUnit = 'g/min'
            if varUnit == 'gram/s':
                varUnit = 'g/s'
            if varUnit == 'gram/m3':
                varUnit = 'g/m3'
            if varUnit == 'gram/L':
                varUnit = 'g/L'
            if varUnit == 'sec':
                varUnit = 's'
            '''

            varLegend = legend
            varScale  = 'linear-linear'

            assert varScale == 'log' or varScale == 'linear' or varScale == 'log-linear' \
                or varScale == 'linear-log' or varScale == 'linear-linear' or \
                varScale == 'log-log'

            time_unit = 's'

            if time_unit == 'minute':
                time_unit = 'min'

            x = np.array( [i for i in self.__df.index] )

            if (varScale == 'linear' or varScale == 'linear-linear' or \
                varScale == 'linear-log') and x.max() >= 60.0:
                x /= 60.0
                if time_unit == 'min':
                    time_unit = 'h'
                if time_unit == 'second' or time_unit=='s':
                    time_unit = 'min'

            y = np.array( self.__df[col_name] )  # convert to numpy ndarray

            '''
            if (y.max() >= 1e3 or y.min() <= -1e3) and varScale != 'linear-log' and \
                    varScale != 'log-log' and varScale != 'log':
                y /= 1e3
                if varUnit == 'gram' or varUnit == 'g':
                    varUnit = 'kg'
                if varUnit == 'L':
                    varUnit = 'kL'
                if varUnit == 'cc':
                    varUnit = 'L'
                if varUnit == 'Ci':
                    varUnit = 'kCi'
                if varUnit == 'W':
                    varUnit = 'kW'
                if varUnit == 'gram/min' or varUnit == 'g/min':
                    varUnit = 'kg/min'
                if varUnit == 'gram/s' or varUnit == 'g/s':
                    varUnit = 'kg/s'
                if varUnit == 'gram/m3' or varUnit == 'g/m3':
                    varUnit = 'kg/m3'
                if varUnit == 'gram/L' or varUnit == 'g/L':
                    varUnit = 'kg/L'
                if varUnit == 'W/L':
                    varUnit = 'kW/L'
                if varUnit == 'Ci/L':
                    varUnit = 'kCi/L'
                if varUnit == '':
                    varUnit = 'x1e3'
                if varUnit == 'L/min':
                    varUnit = 'kL/min'
                if varUnit == 'Pa':
                    varUnit = 'kPa'
                if varUnit == 's':
                    varUnit = 'ks'
                if varUnit == 'm':
                    varUnit = 'km'
                if varUnit == 'm/s':
                    varUnit = 'km/s'

            if (y.max() < 1e-6 and y.min() > -1e-6) and varScale != 'linear-log' and \
                    varScale != 'log-log' and varScale != 'log':
                y *= 1e9
                if varUnit == 'gram' or varUnit == 'g':
                    varUnit = 'ng'
                if varUnit == 'cc':
                    varUnit = 'n-cc'
                if varUnit == 'L':
                    varUnit = 'nL'
                if varUnit == 'W':
                    varUnit = 'nW'
                if varUnit == 'Ci':
                    varUnit = 'nCi'
                if varUnit == 'gram/min' or varUnit == 'g/min':
                    varUnit = 'ng/min'
                if varUnit == 'gram/s' or varUnit == 'g/s':
                    varUnit = 'ng/s'
                if varUnit == 'gram/m3' or varUnit == 'g/m3':
                    varUnit = 'ng/m3'
                if varUnit == 'gram/L' or varUnit == 'g/L':
                    varUnit = 'ng/L'
                if varUnit == 'W/L':
                    varUnit = 'nW/L'
                if varUnit == 'Ci/L':
                    varUnit = 'nCi/L'
                if varUnit == 'L/min':
                    varUnit = 'nL/min'
                if varUnit == 'Pa':
                    varUnit = 'nPa'
                if varUnit == 's':
                    varUnit = 'ns'
                if varUnit == 'm/s':
                    varUnit = 'nm/s'

            if (y.max() >= 1e-6 and y.max()  <  1e-3) or \
               (y.min() > -1e-3 and y.min() <= -1e-6) and varScale != 'linear-log' and \
                    varScale != 'log-log' and varScale != 'log':
                y *= 1e6
                if varUnit == 'gram' or varUnit == 'g':
                    varUnit = 'ug'
                if varUnit == 'cc':
                    varUnit = 'u-cc'
                if varUnit == 'L':
                    varUnit = 'uL'
                if varUnit == 'W':
                    varUnit = 'uW'
                if varUnit == 'Ci':
                    varUnit = 'uCi'
                if varUnit == 'gram/min' or varUnit == 'g/min':
                    varUnit = 'ug/min'
                if varUnit == 'gram/s' or varUnit == 'g/s':
                    varUnit = 'ug/s'
                if varUnit == 'gram/m3' or varUnit == 'g/m3':
                    varUnit = 'ug/m3'
                if varUnit == 'gram/L' or varUnit == 'g/L':
                    varUnit = 'ug/L'
                if varUnit == 'W/L':
                    varUnit = 'uW/L'
                if varUnit == 'Ci/L':
                    varUnit = 'uCi/L'
                if varUnit == 'L/min':
                    varUnit = 'uL/min'
                if varUnit == 'Pa':
                    varUnit = 'uPa'
                if varUnit == 's':
                    varUnit = 'us'
                if varUnit == 'm/s':
                    varUnit = 'um/s'

            if (y.max() >= 1e-3 and y.max()  < 1e-1) or \
               (y.min() <= -1e-3 and y.min() > -1e-1) and varScale != 'linear-log' and \
                    varScale != 'log-log' and varScale != 'log':
                y *= 1e3
                if varUnit == 'gram' or varUnit == 'g':
                    varUnit = 'mg'
                if varUnit == 'cc':
                    varUnit = 'm-cc'
                if varUnit == 'L':
                    varUnit = 'mL'
                if varUnit == 'W':
                    varUnit = 'mW'
                if varUnit == 'Ci':
                    varUnit = 'mCi'
                if varUnit == 'gram/min' or varUnit == 'g/min':
                    varUnit = 'mg/min'
                if varUnit == 'gram/s' or varUnit == 'g/s':
                    varUnit = 'mg/s'
                if varUnit == 'gram/m3' or varUnit == 'g/m3':
                    varUnit = 'mg/m3'
                if varUnit == 'gram/L' or varUnit == 'g/L':
                    varUnit = 'mg/L'
                if varUnit == 'W/L':
                    varUnit = 'mW/L'
                if varUnit == 'Ci/L':
                    varUnit = 'mCi/L'
                if varUnit == 'L/min':
                    varUnit = 'mL/min'
                if varUnit == 'Pa':
                    varUnit = 'mPa'
                if varUnit == 's':
                    varUnit = 'ms'
                if varUnit == 'm/s':
                    varUnit = 'mm/s'
            '''

            ax.set_xlabel('Time [' + time_unit + ']', fontsize=9)
            ax.set_ylabel(varName + ' [' + varUnit + ']', fontsize=9)

            '''
            ymax = y.max()
            dy = ymax * .1
            ymax += dy
            ymin = y.min()
            ymin -= dy

            if abs(ymin - ymax) <= 1.e-4:
                ymin = -1.0
                ymax = 1.0

            ax.set_ylim(ymin, ymax)
            '''

            if ncols >= 4:
                for l in ax.get_xticklabels():
                    l.set_fontsize(8)
            else:
                for l in ax.get_xticklabels():
                    l.set_fontsize(10)
            for l in ax.get_yticklabels():
                l.set_fontsize(10)

            if time_unit == 'h' and x.max() - x.min() <= 5.0:
                majorLocator = MultipleLocator(1.0)
                minorLocator = MultipleLocator(0.5)

                ax.xaxis.set_major_locator(majorLocator)
                ax.xaxis.set_minor_locator(minorLocator)

            if varScale == 'log' or varScale == 'log-log':
                ax.set_xscale('log')
                ax.set_yscale('log')
                positiveX = x > 0.0
                x = np.extract(positiveX, x)
                y = np.extract(positiveX, y)
                positiveY = y > 0.0
                x = np.extract(positiveY, x)
                y = np.extract(positiveY, y)
                if y.size > 0:
                    if y.min() > 0.0 and y.max() > y.min():
                        ymax = y.max()
                        dy = ymax * .1
                        ymax += dy
                        ymin = y.min()
                        ymin -= dy
                        if ymin < 0.0 or ymin > ymax / 1000.0:
                            ymin = ymax / 1000.0
                        ax.set_ylim(ymin, ymax)
                    else:
                        ax.set_ylim(1.0, 10.0)
                else:
                    ax.set_ylim(1.0, 10.0)

            if varScale == 'log-linear':
                ax.set_xscale('log')
                positiveX = x > 0.0 # True if > 0.0
                x = np.extract(positiveX, x)
                y = np.extract(positiveX, y)

            if varScale == 'linear-log':
                ax.set_yscale('log')
                positiveY = y > 0.0 # True if > 0.0
                x = np.extract(positiveY, x)
                y = np.extract(positiveY, y)
                #assert x.size == y.size, 'size error; stop.'
                if y.size > 0:
                    if y.min() > 0.0 and y.max() > y.min():
                        ymax = y.max()
                        dy = ymax * .1
                        ymax += dy
                        ymin = y.min()
                        ymin -= dy
                        if ymin < 0.0 or ymin > ymax / 1000.0:
                            ymin = ymax / 1000.0
                        ax.set_ylim(y.min(), ymax)
                    else:
                        ax.set_ylim(1.0, 10.0)
                else:
                    ax.set_ylim(1.0, 10.0)

            # ...................
            # make the plot here

            yformatter = ScalarFormatter(useMathText=True,useOffset=True)
            yformatter.set_powerlimits((15, 5))
            ax.yaxis.set_major_formatter(yformatter)

            if varLegend:

                ax.plot(x, y, 's-', color='black', linewidth=0.5, markersize=2,
                        markeredgecolor='black', label=varLegend)
            else:

                ax.plot(x, y, 's-', color='black', linewidth=0.5, markersize=2,
                        markeredgecolor='black')

            # ...................

            if species.info:
                ax.set_title(species.info,fontsize=8)
            if varLegend:
                ax.legend(loc='best', prop={'size': 7})
            ax.grid()

        # end of: for i_var in range(num_var):

        fig_name = name+'-'+self.name+'-phase-plot-' + str(i_dash).zfill(2)
        fig.savefig(fig_name+'.png', dpi=dpi, fomat='png')
        plt.close(fig_num)

        #pickle.dump( fig, open(fig_name+'.pickle','wb') )

        return
示例#42
0
gdal.RasterizeLayer(maskRaster, [1], vectorLayer, burn_values=[1])

maskPX = gdarr.DatasetReadAsArray(maskRaster, 0, 0, x, y)
tempPx13 = gdarr.DatasetReadAsArray(tempDataset13, 0, 0, x, y)
tempPx17 = gdarr.DatasetReadAsArray(tempDataset17, 0, 0, x, y)

# do the multiplication:
# membership value* mask = either store as 0 or store the membership value.
# output is an array

pxResult13 = numpy.multiply(maskPX, tempPx13)
pxResult17 = numpy.multiply(maskPX, tempPx17)
print("hey!")

# remove zeros from the array. convert array to a one dimentional list
pxResult13 = numpy.extract(pxResult13 > 0, pxResult13)
pxResult17 = numpy.extract(pxResult17 > 0, pxResult17)

# step6. plot and add labels and legends
plt.xlabel("pixel number")
plt.ylabel("membership value")
plt.grid()
plt.plot(pxResult13, ":", color='red', label="2013")
plt.plot(pxResult17, ":", color='green', label="2017")
plt.title("Plot Of Membership Value Along The Transect Line In 2013 And 2017")
plt.legend(loc="lower left")
plt.show()

# step7. flush the cache and clean memory
layer = None
maskBand.FlushCache()
示例#43
0
    def _get_mean_and_std(self, name, currency, league, sale_time):
        """
        For a given currency sale, get the weighted mean and standard deviation.

        Full returned value list is:

        * mean
        * standard deviation
        * total of all weights used
        * count of considered rows

        This used to be done in the DB, but doing math in the database is
        a pain, and not very portable. Numpy lets us be pretty efficient,
        so we're not losing all that much.
        """
        def calc_mean_std(values, weights):
            mean = numpy.average(values, weights=weights)
            variance = numpy.average((values - mean)**2, weights=weights)
            stddev = math.sqrt(variance)

            return (mean, stddev)

        now = int(time.time())

        # This may be DB-specific. Eventually getting it into a
        # pure-SQLAlchemy form would be good...
        query = self.db.session.query(poefixer.Sale)
        query = query.join(poefixer.Item,
                           poefixer.Sale.item_id == poefixer.Item.id)
        query = query.filter(poefixer.Sale.name == name)
        query = query.filter(poefixer.Item.league == league)
        query = query.filter(poefixer.Sale.sale_currency == currency)
        # Items older than a month are really not worth anything in terms
        # establishing the behavior of the economy. Even rare items like
        # mirrors move fast enough for a month to be sufficient.
        query = query.filter(
            poefixer.Sale.item_updated_at > (now - self.relevant))
        query = query.add_columns(poefixer.Sale.sale_amount,
                                  poefixer.Sale.item_updated_at)

        values = numpy.array([
            (row.sale_amount,
             self.weight_increment / max(1, sale_time - row.item_updated_at))
            for row in query.all()
        ])
        if len(values) == 0:
            return (None, None, None, None)
        prices = values[:, 0]
        weights = values[:, 1]
        mean, stddev = calc_mean_std(prices, weights)
        count = len(prices)
        total_weight = weights.sum()

        if count > 3 and stddev > mean / 2:
            self.logger.debug(
                "%s->%s: Large stddev=%s vs mean=%s, recalibrating", name,
                currency, stddev, mean)
            # Throw out values outside of 2 stddev and try again
            prices_ok = numpy.absolute(prices - mean) <= stddev * 2
            prices = numpy.extract(prices_ok, prices)
            weights = numpy.extract(prices_ok, weights)
            mean, stddev = calc_mean_std(prices, weights)
            count2 = len(prices)
            total_weight = weights.sum()
            self.logger.debug(
                "Recalibration ignored %s rows, final stddev=%s, mean=%s",
                count - count2, stddev, mean)
            count = count2

        return (float(mean), float(stddev), float(total_weight), count)
示例#44
0
def cluster(path):

    timer7 = []
    start7 = timer()

    #fileSize = os.path.getsize(path)/1024000
    df = DF(path)  # Creates dataframe from the .dat file

    X_db = np.array(
        df.drop(columns=['Width', 'S/N'])
    )  # Drops width and S/N data for DBScan to run on the DM - Time space
    X = np.array(df)

    # Sorts the data points by DM
    points_db = X_db[X_db[:, 0].argsort()]
    points = X[X[:, 0].argsort()]

    # Lower DM limit below which the DBScan is now run as it is most likely not extragalactic
    # Speeds up the DBScan runtime
    dm_lim = 0.03 * max(points_db[:, 0])
    points_new = np.array(points_db[points_db[:, 0] > dm_lim])

    X_scaled = scale(1077.4, 50.32576).transform(
        points_new
    )  # Rescales the data so that the x- and y-axes get ratio 1:1
    X_scaled[:, 1] = 3 * X_scaled[:, 1]

    clusters = DBSCAN(eps=xeps, min_samples=xmin).fit_predict(
        X_scaled
    )  # Clustering algorithm, returns array with cluster labels for each point

    # Re-inserts bottom points with labels -1 for RFI
    length = len(points) - len(clusters)
    clusters = np.insert(clusters, 0, np.full(length, -1))

    # Adds column to the points arrays for cluster label
    newArr = np.column_stack((points, clusters[np.newaxis].T))

    # Re-order
    newArr[:, -1] = clusterOrder(newArr[:, -1])

    # Noise condition for Nevents<Nmin => noise
    N_min = 20  # Tuneable number (set to match Karako)
    labels = np.unique(newArr[:, -1])
    for q in range(1, len(labels)):
        label = labels[q]
        labSlice = np.extract(newArr[:, -1] == label, newArr[:, -1])
        if (
                len(labSlice) < N_min
        ):  # Gives points of clusters with less than N_min events the RFI lable of -1
            newArr[:, -1] = np.where(newArr[:, -1] == label, -1, newArr[:, -1])

    # Re-order
    newArr[:, -1] = clusterOrder(newArr[:, -1])

    # Break
    dm_lim = 40  # Increased DM-limit to check for clusters with 'fraction' of their points below this limit
    fraction = 0.05  # Size of the fraction of points allowed in below 'dm_lim'

    # Condition that sets all clusters with 'fraction' of its points below dm_lim to also be classified as RFI
    labels = np.unique(newArr[:, -1])
    for q in range(1, len(labels)):
        label = labels[q]
        labSlice = np.extract(newArr[:, -1] == label, newArr[:, -1])
        num_temp = int(
            round(fraction * len(labSlice), 0)
        )  # Calculates how many points of a label a certain fraction corresponds to and rounds to nearest integer
        temp = sort(
            newArr[newArr[:, -1] == label][:, 0],
            num_temp)  # Returns the 'num_temp' lowest dms in labels_arr[q]
        if (
            (len(temp) > 0) and (max(temp) < dm_lim)
        ):  # If the highest number in temp is below dm_lim then so is the rest in 'temp'
            newArr[:, -1] = np.where(newArr[:, -1] == label, -1, newArr[:, -1])

    # Condition that sets all points with dm below dm_lim to be classified as RFI
    newArr[:, -1][newArr[:, 0] < dm_lim] = -1

    # Re-order
    newArr[:, -1] = clusterOrder(newArr[:, -1])

    # Burst duration condition
    labels = np.unique(newArr[:, -1])
    for q in range(1, len(labels)):
        label = labels[q]
        upper = np.quantile(newArr[newArr[:, -1] == label][:, 1], 0.8)
        lower = np.quantile(newArr[newArr[:, -1] == label][:, 1], 0.2)
        if (
                upper - lower
        ) >= 1:  # If the time between the quantiles is longer than 1s set cluster to RFI
            newArr[:, -1] = np.where(newArr[:, -1] == label, -1, newArr[:, -1])

    # Re-order
    newArr[:, -1] = clusterOrder(newArr[:, -1])

    # Loops through all remaining clusters to exclude further clusters, calculate feature values, and classify them using Random Forest
    labels = np.unique(newArr[:, -1])

    end7 = timer()
    timer7.append(end7 - start7)
    #print("Clustering in module: ", np.mean(timer7))

    return newArr, labels
示例#45
0
def count_R_Fb(mag):
    r, dens, densms, densps = np.loadtxt('density_{0}_{1}_{2}.txt'.format(name, mag, h), unpack=True, usecols = (0, 1, 3, 4))
    rmax = np.amax(r)
    densmax = int(np.amax(densps))+1
    density_spline = spline(r, dens)                                                                  
    density_msigma_spline = spline(r, densms)                                                         
    density_psigma_spline = spline(r, densps)                                                         
                                                                                                      
    radius = np.arange(0, rmax+PRECISION, PRECISION)

    density = density_spline(radius)
    density_low = density_msigma_spline(radius)
    density_up = density_psigma_spline(radius)

    rect_deviation = np.zeros_like(radius)

    for i in range(len(radius)):
        rect = density[i]*(rmax-radius[i])
        integral = simps(density[i:], radius[i:])
        rect_deviation[i] = abs(rect-integral)
        
    plt.plot(radius, density, 'k', linewidth=1)
    plt.plot(radius, density_low, 'k', linestyle='dashed', linewidth=1)
    plt.plot(radius, density_up, 'k', linestyle='dashed', linewidth=1)

    for i in range(len(radius)):
        if rect_deviation[i] < 0.5:
            R, Fb = radius[i], density[i]
            dFb = max(density_up[i]-Fb, Fb-density_low[i])
            ibound = i
            break

    dR1, dR2, dR3, dR4 = 0, 0, 0, 0
    roots = (spline(radius, density-Fb, extrapolate=False)).roots()
    roots_l = (spline(radius, density_low-Fb, extrapolate=False)).roots()
    roots_u = (spline(radius, density_up-Fb, extrapolate=False)).roots()
    
    rl_left = np.extract(roots_l < R, roots_l)
    ru_left = np.extract(roots_u < R, roots_u)
    rl_right = np.extract(roots_l > R, roots_l)
    ru_right = np.extract(roots_u > R, roots_u)
    ind_r = (np.argwhere(roots == R))[0][0]
    if ind_r == roots.size-1:
        right_r = rmax - R
    else:
        right_r = roots[ind_r+1]-R
    
    if ind_r == 0:
        left_r = R
    else:
        left_r = R - roots[ind_r-1]
    
    if rl_left.size != 0:
        if np.amin(R-rl_left) < left_r:
            dR1 = np.amin(R-rl_left)

    if ru_left.size != 0:   
        if np.amin(R-ru_left) < left_r:
            dR2 = np.amin(R-ru_left)
    
    if rl_right.size != 0:   
        if np.amin(rl_right-R) < right_r:
            dR3 = np.amin(rl_right-R)

    if ru_right.size != 0:
        if np.amin(ru_right-R) < right_r:
            dR4 = np.amin(ru_right-R)

    dR = max(dR1, dR2, dR3, dR4)

    if dR > R:
        dR = R
    if dFb > Fb:
        dFb = Fb

    ax = plt.gca()
    for tick in ax.xaxis.get_major_ticks():
        tick.label.set_fontsize(12)
    for tick in ax.yaxis.get_major_ticks():
        tick.label.set_fontsize(12)
    ax.axhline(Fb, c='b', alpha=0.8, lw=0.5, label = "Fb = {0:5.2f} ± {1:5.2f}".format(Fb, dFb))
    ax.axvline(R, c='r', alpha=0.8, lw=0.5, label = "R = {0:5.2f} ± {1:5.2f}".format(R, dR))
    ax.tick_params(width=2)
    ax.grid(alpha=0.2, linestyle='dashed', linewidth=0.5)
    ax.set_xlim(0, rmax)
    ax.set_ylim(0, densmax)
    ax.legend()
    plt.title("Радиальный профиль плотности {0},\nh={1}', maglim={2}m".format(name, h, mag), fontsize=18)
    ax.set_xlabel(r'Угловое расстояние от центра, $arcmin$', fontsize=16)
    ax.set_ylabel(r'Радиальная плотность, $arcmin^{-2}$', fontsize=16)
    plt.savefig('density_{0}_{1}_{2}_RFb.png'.format(name, mag, h), dpi=300)
    plt.close()
    return (R, dR, Fb, dFb)
示例#46
0
def glrm_orthonnmf():
    m = 1000
    n = 100
    k = 10

    print "Uploading random uniform matrix with rows = " + str(
        m) + " and cols = " + str(n)
    Y = np.random.rand(k, n)
    X = np.random.rand(m, k)
    train = np.dot(X, Y)
    train_h2o = h2o.H2OFrame(train.tolist())

    print "Run GLRM with orthogonal non-negative regularization on X, non-negative regularization on Y"
    initial_y = np.random.rand(k, n)
    initial_y_h2o = h2o.H2OFrame(initial_y.tolist())
    glrm_h2o = h2o.glrm(x=train_h2o,
                        k=k,
                        init="User",
                        user_y=initial_y_h2o,
                        loss="Quadratic",
                        regularization_x="OneSparse",
                        regularization_y="NonNegative",
                        gamma_x=1,
                        gamma_y=1)
    glrm_h2o.show()

    print "Check that X and Y matrices are non-negative"
    fit_y = glrm_h2o._model_json['output']['archetypes'].cell_values
    fit_y_np = [[float(s) for s in list(row)[1:]] for row in fit_y]
    fit_y_np = np.array(fit_y_np)
    fit_x = h2o.get_frame(
        glrm_h2o._model_json['output']['loading_key']['name'])
    fit_x_np = np.array(h2o.as_list(fit_x))
    assert np.all(fit_y_np >= 0), "Y must contain only non-negative elements"
    assert np.all(fit_x_np >= 0), "X must contain only non-negative elements"

    print "Check that columns of X are orthogonal"
    xtx = np.dot(np.transpose(fit_x_np), fit_x_np)
    offdiag = np.extract(1 - np.eye(k), xtx)
    assert np.all(
        offdiag == 0), "All off diagonal elements of X'X must equal zero"

    print "Check final objective function value"
    fit_xy = np.dot(fit_x_np, fit_y_np)
    glrm_obj = glrm_h2o._model_json['output']['objective']
    sse = np.sum(np.square(train.__sub__(fit_xy)))
    assert abs(glrm_obj - sse) < 1e-6, "Final objective was " + str(
        glrm_obj) + " but should equal " + str(sse)

    print "Impute XY and check error metrics"
    pred_h2o = glrm_h2o.predict(train_h2o)
    pred_np = np.array(h2o.as_list(pred_h2o))
    assert np.allclose(
        pred_np, fit_xy
    ), "Imputation for numerics with quadratic loss should equal XY product"
    glrm_numerr = glrm_h2o._model_json['output'][
        'training_metrics']._metric_json['numerr']
    glrm_caterr = glrm_h2o._model_json['output'][
        'training_metrics']._metric_json['caterr']
    assert abs(glrm_numerr - glrm_obj) < 1e-3, "Numeric error was " + str(
        glrm_numerr) + " but should equal final objective " + str(glrm_obj)
    assert glrm_caterr == 0, "Categorical error was " + str(
        glrm_caterr) + " but should be zero"

    print "Run GLRM with orthogonal non-negative regularization on both X and Y"
    initial_y = np.random.rand(k, n)
    initial_y_h2o = h2o.H2OFrame(initial_y.tolist())
    glrm_h2o = h2o.glrm(x=train_h2o,
                        k=k,
                        init="User",
                        user_y=initial_y_h2o,
                        loss="Quadratic",
                        regularization_x="OneSparse",
                        regularization_y="OneSparse",
                        gamma_x=1,
                        gamma_y=1)
    glrm_h2o.show()

    print "Check that X and Y matrices are non-negative"
    fit_y = glrm_h2o._model_json['output']['archetypes'].cell_values
    fit_y_np = [[float(s) for s in list(row)[1:]] for row in fit_y]
    fit_y_np = np.array(fit_y_np)
    fit_x = h2o.get_frame(
        glrm_h2o._model_json['output']['loading_key']['name'])
    fit_x_np = np.array(h2o.as_list(fit_x))
    assert np.all(fit_y_np >= 0), "Y must contain only non-negative elements"
    assert np.all(fit_x_np >= 0), "X must contain only non-negative elements"

    print "Check that columns of X are orthogonal"
    xtx = np.dot(np.transpose(fit_x_np), fit_x_np)
    offdiag_x = np.extract(1 - np.eye(k), xtx)
    assert np.all(
        offdiag_x == 0), "All off diagonal elements of X'X must equal zero"

    print "Check that rows of Y are orthogonal"
    yyt = np.dot(fit_y_np, np.transpose(fit_y_np))
    offdiag_y = np.extract(1 - np.eye(k), yyt)
    assert np.all(
        offdiag_y == 0), "All off diagonal elements of YY' must equal zero"

    print "Check final objective function value"
    fit_xy = np.dot(fit_x_np, fit_y_np)
    glrm_obj = glrm_h2o._model_json['output']['objective']
    sse = np.sum(np.square(train.__sub__(fit_xy)))
    assert abs(glrm_obj - sse) < 1e-6, "Final objective was " + str(
        glrm_obj) + " but should equal " + str(sse)

    print "Impute XY and check error metrics"
    pred_h2o = glrm_h2o.predict(train_h2o)
    pred_np = np.array(h2o.as_list(pred_h2o))
    assert np.allclose(
        pred_np, fit_xy
    ), "Imputation for numerics with quadratic loss should equal XY product"
    glrm_numerr = glrm_h2o._model_json['output'][
        'training_metrics']._metric_json['numerr']
    glrm_caterr = glrm_h2o._model_json['output'][
        'training_metrics']._metric_json['caterr']
    assert abs(glrm_numerr - glrm_obj) < 1e-3, "Numeric error was " + str(
        glrm_numerr) + " but should equal final objective " + str(glrm_obj)
    assert glrm_caterr == 0, "Categorical error was " + str(
        glrm_caterr) + " but should be zero"
示例#47
0
def WENO(thing):
    """
    I have to split this up. It has to return a value depending on if
    the original value was positive or negative. It is a small waste
    but I can just calculate the entire thing both the positive way and
    the negative way, then select out the values that were originally
    positive or negative and then put them into the final array
    """
    # determine the indices where positive
    positive_indices = []
    for i in range(len(thing)):
        if thing[i] >= 0:
            positive_indices.append(i)
    # determine the indices where negative
    negative_indices = []
    for i in range(len(thing)):
        if thing[i] < 0:
            negative_indices.append(i)
    # calculate positive

    um2 = roll(thing, 2)
    um1 = roll(thing, 1)
    up1 = roll(thing, -1)
    up2 = roll(thing, -2)

    u0 = (1. / 3.) * um2 - (7. / 6.) * um1 + (11. / 6.) * thing
    u1 = -(1. / 6.) * um1 + (5. / 6.) * thing + (1. / 3.) * up1
    u2 = (1. / 3.) * thing + (5. / 6.) * up1 - (1. / 6.) * up2

    # nonlinear weights

    epsilon = 10**-6

    beta0 = (13. / 12.) * (um2 - 2 * um1 + thing)**2 + .25 * (um2 - 4 * um1 +
                                                              3 * thing)**2
    beta1 = (13. / 12.) * (um1 - 2 * thing + up1)**2 + .25 * (um1 - up1)**2
    beta2 = (13. / 12.) * (thing - 2 * up1 + up2)**2 + .25 * (3 * thing -
                                                              4 * up1 + up2)**2

    beta = [beta0, beta1, beta2]
    gamma = [.1, .6, .3]

    omegatilde = [0, 0, 0]
    for i in range(3):
        omegatilde[i] = gamma[i] / (epsilon + beta[i])**2

    omega = [0, 0, 0]
    for i in range(3):
        omega[i] = omegatilde[i] / sum(omegatilde)

    u_hlf = omega[0] * u0 + omega[1] * u1 + omega[2] * u2

    flux = (u_hlf)**2
    duudx = -(flux - roll(flux, 1)) / dx

    # calculate negative
    um1 = np.roll(thing, 1)
    up1 = np.roll(thing, -1)
    up2 = np.roll(thing, -2)
    up3 = np.roll(thing, -3)

    u0 = (1. / 3.) * up3 - (7. / 6.) * up2 + (11. / 6.) * up1
    u1 = -(1. / 6.) * up2 + (5. / 6.) * up1 + (1. / 3.) * thing
    u2 = (1. / 3.) * up1 + (5. / 6.) * thing - (1. / 6.) * um1

    # nonlinear weights

    epsilon = 10**-6

    beta0 = (13. / 12.) * (up3 - 2 * up2 + up1)**2 + .25 * (up3 - 4 * up2 +
                                                            3 * up1)**2
    beta1 = (13. / 12.) * (up2 - 2 * up1 + thing)**2 + .25 * (up2 - thing)**2
    beta2 = (13. / 12.) * (up1 - 2 * thing +
                           um1)**2 + .25 * (3 * up1 - 4 * thing + um1)**2

    beta = [beta0, beta1, beta2]
    gamma = [.1, .6, .3]
    omegatilde = [0, 0, 0]
    for i in range(3):
        omegatilde[i] = gamma[i] / (epsilon + beta[i])**2

    omega = [0, 0, 0]

    for i in range(3):
        omega[i] = omegatilde[i] / sum(omegatilde)

    u_hlfneg = omega[0] * u0 + omega[1] * u1 + omega[2] * u2
    fluxneg = (u_hlfneg)**2
    duudxneg = -(fluxneg - roll(fluxneg, 1)) / dx

    # extract values where was positive
    positive_values = np.extract(thing >= 0, duudx)
    # negative
    negative_values = np.extract(thing < 0, duudxneg)

    # combine into new list
    positive = np.zeros(nx)
    np.put(positive, positive_indices, positive_values)
    negative = np.zeros(nx)
    np.put(negative, negative_indices, negative_values)
    dudt = np.zeros(nx)
    np.put(dudt, positive_indices, positive_values)
    np.put(dudt, negative_indices, negative_values)
    return dudt
示例#48
0
class Bfloat16Test(parameterized.TestCase):
    """Tests the non-numpy Python methods of the bfloat16 type."""
    def testRoundTripToFloat(self):
        for v in FLOAT_VALUES:
            np.testing.assert_equal(v, float(bfloat16(v)))

    def testRoundTripNumpyTypes(self):
        for dtype in [np.float16, np.float32, np.float64, np.longdouble]:
            np.testing.assert_equal(-3.75, dtype(bfloat16(dtype(-3.75))))
            np.testing.assert_equal(1.5, float(bfloat16(dtype(1.5))))
            np.testing.assert_equal(4.5, dtype(bfloat16(np.array(4.5, dtype))))
            np.testing.assert_equal(np.array([2, 5, -1], bfloat16),
                                    bfloat16(np.array([2, 5, -1], dtype)))

    def testRoundTripToInt(self):
        for v in [
                -256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512
        ]:
            self.assertEqual(v, int(bfloat16(v)))

    # pylint: disable=g-complex-comprehension
    @parameterized.named_parameters(
        ({
            "testcase_name": "_" + dtype.__name__,
            "dtype": dtype
        } for dtype in
         [bfloat16, np.float16, np.float32, np.float64, np.longdouble]))
    def testRoundTripToNumpy(self, dtype):
        for v in FLOAT_VALUES:
            np.testing.assert_equal(v, bfloat16(dtype(v)))
            np.testing.assert_equal(v, dtype(bfloat16(dtype(v))))
            np.testing.assert_equal(v, dtype(bfloat16(np.array(v, dtype))))
        if dtype != bfloat16:
            np.testing.assert_equal(
                np.array(FLOAT_VALUES, dtype),
                bfloat16(np.array(FLOAT_VALUES, dtype)).astype(dtype))

    def testStr(self):
        self.assertEqual("0", str(bfloat16(0.0)))
        self.assertEqual("1", str(bfloat16(1.0)))
        self.assertEqual("-3.5", str(bfloat16(-3.5)))
        self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7"))))
        self.assertEqual("inf", str(bfloat16(float("inf"))))
        self.assertEqual("-inf", str(bfloat16(float("-inf"))))
        self.assertEqual("nan", str(bfloat16(float("nan"))))

    def testRepr(self):
        self.assertEqual("0", repr(bfloat16(0)))
        self.assertEqual("1", repr(bfloat16(1)))
        self.assertEqual("-3.5", repr(bfloat16(-3.5)))
        self.assertEqual("0.0078125", repr(bfloat16(float.fromhex("1.0p-7"))))
        self.assertEqual("inf", repr(bfloat16(float("inf"))))
        self.assertEqual("-inf", repr(bfloat16(float("-inf"))))
        self.assertEqual("nan", repr(bfloat16(float("nan"))))

    def testHashZero(self):
        """Tests that negative zero and zero hash to the same value."""
        self.assertEqual(hash(bfloat16(-0.0)), hash(bfloat16(0.0)))

    @parameterized.parameters(
        np.extract(np.isfinite(FLOAT_VALUES), FLOAT_VALUES))
    def testHashNumbers(self, value):
        self.assertEqual(hash(value), hash(bfloat16(value)), str(value))

    @parameterized.named_parameters(("PositiveNan", bfloat16(float("nan"))),
                                    ("NegativeNan", bfloat16(float("-nan"))))
    def testHashNan(self, nan):
        nan_hash = hash(nan)
        nan_object_hash = object.__hash__(nan)
        # The hash of a NaN is either 0 or a hash of the object pointer.
        self.assertIn(nan_hash, (sys.hash_info.nan, nan_object_hash), str(nan))

    def testHashInf(self):
        self.assertEqual(sys.hash_info.inf, hash(bfloat16(float("inf"))),
                         "inf")
        self.assertEqual(-sys.hash_info.inf, hash(bfloat16(float("-inf"))),
                         "-inf")

    # Tests for Python operations
    def testNegate(self):
        for v in FLOAT_VALUES:
            np.testing.assert_equal(-v, float(-bfloat16(v)))

    def testAdd(self):
        np.testing.assert_equal(0, float(bfloat16(0) + bfloat16(0)))
        np.testing.assert_equal(1, float(bfloat16(1) + bfloat16(0)))
        np.testing.assert_equal(0, float(bfloat16(1) + bfloat16(-1)))
        np.testing.assert_equal(5.5, float(bfloat16(2) + bfloat16(3.5)))
        np.testing.assert_equal(1.25, float(bfloat16(3.5) + bfloat16(-2.25)))
        np.testing.assert_equal(
            float("inf"), float(bfloat16(float("inf")) + bfloat16(-2.25)))
        np.testing.assert_equal(
            float("-inf"), float(bfloat16(float("-inf")) + bfloat16(-2.25)))
        self.assertTrue(
            math.isnan(float(bfloat16(3.5) + bfloat16(float("nan")))))

    def testAddScalarTypePromotion(self):
        """Tests type promotion against Numpy scalar values."""
        types = [bfloat16, np.float16, np.float32, np.float64, np.longdouble]
        for lhs_type in types:
            for rhs_type in types:
                expected_type = numpy_promote_types(lhs_type, rhs_type)
                actual_type = type(lhs_type(3.5) + rhs_type(2.25))
                self.assertEqual(expected_type, actual_type)

    def testAddArrayTypePromotion(self):
        self.assertEqual(np.float32,
                         type(bfloat16(3.5) + np.array(2.25, np.float32)))
        self.assertEqual(np.float32,
                         type(np.array(3.5, np.float32) + bfloat16(2.25)))

    def testSub(self):
        np.testing.assert_equal(0, float(bfloat16(0) - bfloat16(0)))
        np.testing.assert_equal(1, float(bfloat16(1) - bfloat16(0)))
        np.testing.assert_equal(2, float(bfloat16(1) - bfloat16(-1)))
        np.testing.assert_equal(-1.5, float(bfloat16(2) - bfloat16(3.5)))
        np.testing.assert_equal(5.75, float(bfloat16(3.5) - bfloat16(-2.25)))
        np.testing.assert_equal(
            float("-inf"), float(bfloat16(-2.25) - bfloat16(float("inf"))))
        np.testing.assert_equal(
            float("inf"), float(bfloat16(-2.25) - bfloat16(float("-inf"))))
        self.assertTrue(
            math.isnan(float(bfloat16(3.5) - bfloat16(float("nan")))))

    def testMul(self):
        np.testing.assert_equal(0, float(bfloat16(0) * bfloat16(0)))
        np.testing.assert_equal(0, float(bfloat16(1) * bfloat16(0)))
        np.testing.assert_equal(-1, float(bfloat16(1) * bfloat16(-1)))
        np.testing.assert_equal(-7.875, float(bfloat16(3.5) * bfloat16(-2.25)))
        np.testing.assert_equal(
            float("-inf"), float(bfloat16(float("inf")) * bfloat16(-2.25)))
        np.testing.assert_equal(
            float("inf"), float(bfloat16(float("-inf")) * bfloat16(-2.25)))
        self.assertTrue(
            math.isnan(float(bfloat16(3.5) * bfloat16(float("nan")))))

    def testDiv(self):
        self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0))))
        np.testing.assert_equal(float("inf"), float(bfloat16(1) / bfloat16(0)))
        np.testing.assert_equal(-1, float(bfloat16(1) / bfloat16(-1)))
        np.testing.assert_equal(-1.75, float(bfloat16(3.5) / bfloat16(-2)))
        np.testing.assert_equal(
            float("-inf"), float(bfloat16(float("inf")) / bfloat16(-2.25)))
        np.testing.assert_equal(
            float("inf"), float(bfloat16(float("-inf")) / bfloat16(-2.25)))
        self.assertTrue(
            math.isnan(float(bfloat16(3.5) / bfloat16(float("nan")))))

    def testLess(self):
        for v in FLOAT_VALUES:
            for w in FLOAT_VALUES:
                self.assertEqual(v < w, bfloat16(v) < bfloat16(w))

    def testLessEqual(self):
        for v in FLOAT_VALUES:
            for w in FLOAT_VALUES:
                self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w))

    def testGreater(self):
        for v in FLOAT_VALUES:
            for w in FLOAT_VALUES:
                self.assertEqual(v > w, bfloat16(v) > bfloat16(w))

    def testGreaterEqual(self):
        for v in FLOAT_VALUES:
            for w in FLOAT_VALUES:
                self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w))

    def testEqual(self):
        for v in FLOAT_VALUES:
            for w in FLOAT_VALUES:
                self.assertEqual(v == w, bfloat16(v) == bfloat16(w))

    def testNotEqual(self):
        for v in FLOAT_VALUES:
            for w in FLOAT_VALUES:
                self.assertEqual(v != w, bfloat16(v) != bfloat16(w))

    def testNan(self):
        a = np.isnan(bfloat16(float("nan")))
        self.assertTrue(a)
        numpy_assert_allclose(np.array([1.0, a]), np.array([1.0, a]))

        a = np.array(
            [bfloat16(1.34375),
             bfloat16(1.4375),
             bfloat16(float("nan"))],
            dtype=bfloat16)
        b = np.array(
            [bfloat16(1.3359375),
             bfloat16(1.4375),
             bfloat16(float("nan"))],
            dtype=bfloat16)
        numpy_assert_allclose(a,
                              b,
                              rtol=0.1,
                              atol=0.1,
                              equal_nan=True,
                              err_msg="",
                              verbose=True)

    def testSort(self):
        values_to_sort = np.float32(FLOAT_VALUES)
        sorted_f32 = np.sort(values_to_sort)
        sorted_bf16 = np.sort(values_to_sort.astype(bfloat16))  # pylint: disable=too-many-function-args
        np.testing.assert_equal(sorted_f32, np.float32(sorted_bf16))

    def testArgmax(self):
        values_to_sort = np.float32(bfloat16(np.float32(FLOAT_VALUES)))
        argmax_f32 = np.argmax(values_to_sort)
        argmax_bf16 = np.argmax(values_to_sort.astype(bfloat16))  # pylint: disable=too-many-function-args
        np.testing.assert_equal(argmax_f32, argmax_bf16)

    def testArgmaxOnNan(self):
        """Ensures we return the right thing for multiple NaNs."""
        one_with_nans = np.array(
            [1.0, float("nan"), float("nan")], dtype=np.float32)
        np.testing.assert_equal(np.argmax(one_with_nans.astype(bfloat16)),
                                np.argmax(one_with_nans))

    def testArgmaxOnNegativeInfinity(self):
        """Ensures we return the right thing for negative infinities."""
        inf = np.array([float("-inf")], dtype=np.float32)
        np.testing.assert_equal(np.argmax(inf.astype(bfloat16)),
                                np.argmax(inf))

    def testArgmin(self):
        values_to_sort = np.float32(bfloat16(np.float32(FLOAT_VALUES)))
        argmin_f32 = np.argmin(values_to_sort)
        argmin_bf16 = np.argmin(values_to_sort.astype(bfloat16))  # pylint: disable=too-many-function-args
        np.testing.assert_equal(argmin_f32, argmin_bf16)

    def testArgminOnNan(self):
        """Ensures we return the right thing for multiple NaNs."""
        one_with_nans = np.array(
            [1.0, float("nan"), float("nan")], dtype=np.float32)
        np.testing.assert_equal(np.argmin(one_with_nans.astype(bfloat16)),
                                np.argmin(one_with_nans))

    def testArgminOnPositiveInfinity(self):
        """Ensures we return the right thing for positive infinities."""
        inf = np.array([float("inf")], dtype=np.float32)
        np.testing.assert_equal(np.argmin(inf.astype(bfloat16)),
                                np.argmin(inf))

    def testDtypeFromString(self):
        assert np.dtype("bfloat16") == np.dtype(bfloat16)
示例#49
0
    def interpolate(self):
        i0_copy = np.copy(self.i0)
        iflu_copy = np.copy(self.iflu)
        ir_copy = np.copy(self.ir)
        energy_copy = np.copy(self.energy)
        i0_interp = []
        iflu_interp = []
        ir_interp = []
        energy_interp = []
        trigger_interp = []
        timestamps = []

        for i in range(len(self.trigger) - 1):

            condition1 = (self.trigger[i, 0] <= i0_copy[:, 0]) == (
                self.trigger[i + 1, 0] > i0_copy[:, 0])
            interval1 = np.extract(condition1, i0_copy[:, 0])

            condition2 = (self.trigger[i, 0] <= iflu_copy[:, 0]) == (
                self.trigger[i + 1, 0] > iflu_copy[:, 0])
            interval2 = np.extract(condition2, iflu_copy[:, 0])

            condition3 = (self.trigger[i, 0] <= energy_copy[:, 0]) == (
                self.trigger[i + 1, 0] > energy_copy[:, 0])
            interval3 = np.extract(condition3, energy_copy[:, 0])

            condition4 = (self.trigger[i, 0] <= ir_copy[:, 0]) == (
                self.trigger[i + 1, 0] > ir_copy[:, 0])
            interval4 = np.extract(condition4, ir_copy[:, 0])

            if len(interval1) and len(interval2) and len(interval3):
                interval_mean_i0 = np.mean(
                    np.extract(condition1, i0_copy[:, 1]))
                i0_interp.append(interval_mean_i0)
                i0_pos_high = np.where(i0_copy[:,
                                               0] == interval1[len(interval1) -
                                                               1])[0][0]
                i0_copy = i0_copy[i0_pos_high + 1:len(i0_copy)]

                interval_mean_iflu = np.mean(
                    np.extract(condition2, iflu_copy[:, 1]))
                iflu_interp.append(interval_mean_iflu)
                iflu_pos_high = np.where(
                    iflu_copy[:, 0] == interval2[len(interval2) - 1])[0][0]
                iflu_copy = iflu_copy[iflu_pos_high + 1:len(iflu_copy)]

                interval_mean_energy = np.mean(
                    np.extract(condition3, energy_copy[:, 1]))
                energy_interp.append(interval_mean_energy)
                energy_pos_high = np.where(
                    energy_copy[:, 0] == interval3[len(interval3) - 1])[0][0]
                energy_copy = energy_copy[energy_pos_high + 1:len(energy_copy)]

                interval_mean_ir = np.mean(
                    np.extract(condition4, ir_copy[:, 1]))
                ir_interp.append(interval_mean_ir)
                ir_pos_high = np.where(ir_copy[:,
                                               0] == interval4[len(interval4) -
                                                               1])[0][0]
                ir_copy = ir_copy[ir_pos_high + 1:len(ir_copy)]

                timestamps.append(
                    (self.trigger[i, 0] + self.trigger[i + 1, 0]) / 2)
                trigger_interp.append(self.trigger[i, 1])

        self.i0_interp = np.array([timestamps, i0_interp]).transpose()
        self.iflu_interp = np.array([timestamps, iflu_interp]).transpose()
        self.ir_interp = np.array([timestamps, ir_interp]).transpose()
        #self.it_interp = np.copy(self.iflu_interp)
        self.energy_interp = np.array([timestamps, energy_interp]).transpose()
        self.trigger_interp = np.array([timestamps,
                                        trigger_interp]).transpose()
示例#50
0
def compute_transport(timeavg, mesh, mask, name='Drake Passage',output='transport.nc'):
  mesh = xr.open_dataset(mesh)
  mask = get_mask_short_names(xr.open_dataset(mask))

  if name.lower() == 'all':
    transectList = mask.shortNames[:].values
    condition = transectList != "Atlantic Transec"
    transectList = np.extract(condition, transectList)
  else:
    transectList = name.split(',')
    if platform.release()[0] == '3':
      for i in range(len(transectList)):
        transectList[i] = "b'" + transectList[i]

  print('Computing Transport for the following transects ',transectList)
  nTransects = len(transectList)
  maxEdges = mask.dims['maxEdgesInTransect']
# create empty t list for time
  t = []
# Compute refLayerThickness to avoid need for hist file
  refBottom = mesh.refBottomDepth.values
  nz = mesh.dims['nVertLevels']
  h = np.zeros(nz)
  h[0] = refBottom[0]
  for i in range(1,nz):
    h[i] = refBottom[i] - refBottom[i-1]

# Get a list of edges and total edges in each transect
  nEdgesInTransect = np.zeros(nTransects)
  edgeVals = np.zeros((nTransects,maxEdges))
  for i in range(nTransects):
    amask = mask.sel(shortNames=transectList[i]).squeeze()
    transectEdges = amask.transectEdgeGlobalIDs.values
    inds = np.where(transectEdges > 0)[0]
    nEdgesInTransect[i] = len(inds)
    transectEdges = transectEdges[inds]
    edgeVals[i,:len(inds)] = np.asarray(transectEdges-1, dtype='i')

  nEdgesInTransect = np.asarray(nEdgesInTransect, dtype='i')

# Create a list with the start and stop for transect bounds
  nTransectStartStop = np.zeros(nTransects+1)
  for j in range(1,nTransects+1):
    nTransectStartStop[j] = nTransectStartStop[j-1] + nEdgesInTransect[j-1]

  edgesToRead = edgeVals[0,:nEdgesInTransect[0]]
  for i in range(1,nTransects):
    edgesToRead = np.hstack([edgesToRead,edgeVals[i,:nEdgesInTransect[i]]])

  edgesToRead = np.asarray(edgesToRead, dtype='i')
  dvEdge = mesh.dvEdge.sel(nEdges=edgesToRead).values
  edgeSigns = np.zeros((nTransects,len(edgesToRead)))
  for i in range(nTransects):
    edgeSigns[i,:] = mask.sel(nEdges=edgesToRead, shortNames=transectList[i]).squeeze().transectEdgeMaskSigns.values

# Read time average files one at a time and slice
  fileList = sorted(glob.glob(timeavg))
  transport = np.zeros((len(fileList),nTransects))
  t = np.zeros(len(fileList))
  for i,fname in enumerate(fileList):
    ncid = Dataset(fname,'r')
    if 'timeMonthly_avg_normalTransportVelocity' in ncid.variables.keys():
      vel = ncid.variables['timeMonthly_avg_normalTransportVelocity'][0,edgesToRead,:]
    elif 'timeMonthly_avg_normalVelocity' in ncid.variables.keys():
      vel = ncid.variables['timeMonthly_avg_normalVelocity'][0,edgesToRead,:]
      if 'timeMonthly_avg_normalGMBolusVelocity' in ncid.variables.keys():
        vel += ncid.variables['timeMonthly_avg_normalGMBolusVelocity'][0,edgesToRead,:]
    else:
      raise KeyError('no appropriate normalVelocity variable found')
    t[i] = ncid.variables['timeMonthly_avg_daysSinceStartOfSim'][:] / 365.
    ncid.close()
#   Compute transport for each transect
    for j in range(nTransects):
      start = int(nTransectStartStop[j])
      stop = int(nTransectStartStop[j+1])
      transport[i,j] = (dvEdge[start:stop,np.newaxis]*h[np.newaxis,:]*vel[start:stop,:] \
          *edgeSigns[j,start:stop,np.newaxis]).sum()*m3ps_to_Sv

# Define some dictionaries for transect plotting
  obsDict = {'Drake Passage':[120,175],'Tasmania-Ant':[147,167],'Africa-Ant':None,'Antilles Inflow':[-23.1,-13.7], \
          'Mona Passage':[-3.8,-1.4],'Windward Passage':[-7.2,-6.8],'Florida-Cuba':[30,33],'Florida-Bahamas':[30,33], \
          'Indonesian Throughflow':[-21,-11],'Agulhas':[-90,-50],'Mozambique Channel':[-20,-8], \
          'Bering Strait':[0.17,1.49],'Lancaster Sound':None,'Fram Strait':None,'Robeson Channel':None,'Nares Strait':None}
  labelDict = {'Drake Passage':'drake','Tasmania-Ant':'tasmania','Africa-Ant':'africaAnt','Antilles Inflow':'Antilles', \
          'Mona Passage':'monaPassage','Windward Passage':'windwardPassage','Florida-Cuba':'floridaCuba',\
             'Florida-Bahamas':'floridaBahamas', \
          'Indonesian Throughflow':'indonesia','Agulhas':'agulhas','Mozambique Channel':'mozambique', \
          'Bering Strait':'beringstrait','Lancaster Sound':'lancaster','Fram Strait':'fram','Robeson Channel':'robeson','Nares Strait':'nares'}

  for i in range(nTransects):
    plt.figure()
    if platform.release()[0]=='3':
        searchString = transectList[i][2:]
    else:
        searchString = transectList[i]
    bounds = obsDict[searchString]
    title = 'Transport for '+searchString
    plt.plot(t,transport[:,i],'k',linewidth=2)
    if bounds is not None:
        plt.gca().fill_between(t, bounds[0]*np.ones_like(t), bounds[1]*np.ones_like(t), alpha=0.3, label='observations')
    plt.ylabel('Transport (Sv)',fontsize=32)
    plt.xlabel('Time (Years)',fontsize=32)
    plt.title(title,fontsize=32)
    plt.savefig('transport_'+labelDict[searchString]+'.png')

# Add calls to save transport and then can build up
  ncid=Dataset(output,mode='w',clobber=True, format='NETCDF3_CLASSIC')
  ncid.createDimension('Time',None)
  ncid.createDimension('nTransects',nTransects)
  ncid.createDimension('StrLen',64)
  transectNames=ncid.createVariable('TransectNames','c',('nTransects','StrLen'))
  times=ncid.createVariable('Time','f8','Time')
  transportOut=ncid.createVariable('Transport','f8',('Time','nTransects'))

  times[:] = t
  transportOut[:,:] = transport

  for i in range(nTransects):
    nLetters = len(transectList[i])
    transectNames[i,:nLetters] = transectList[i]
  ncid.close()
示例#51
0
def show_recall(y, y_hat):
    # print y_hat[y == 1]
    print(
        '召回率:%.2f%%' %
        (100 * float(np.sum(y_hat[y == 1] == 1)) / np.extract(y == 1, y).size))
示例#52
0
    def rotate_IQ_plane(self, entries=None):
        '''
        The data optimizer is fed with microwave data of both quadratures, typically amplitude and phase.
        It effectively performs a principle axis transformation in the complex plane of all data points
        that are located along a line for a typical projective qubit state measurement without single-shot
        fidelity and using a ADC acquisition card. Successively, data optimizer returns a projection along
        the in-line direction quadrature containing maximum information. Indeally, no information is lost
        as no data is left in the orthogonal quadrature.

        The algorithm locates one of the edge data points on either end of the line-shaped data point distribution
        by summing and maximizing the mutual distances between data points. It then calculates the distance
        of each data point with respect to this distinct extremal data point.

        We assume that the possible outcomes of a strong and projective quantum measurement, i.e. the qubit
        states |0>, |1>, result in two distinct locations in the complex plane, corresponding to signatures
        of the dispersive readout resonator. In the absence of single-shot readout fidelity, the pre-averaging
        that is perfomed by the ADC card in the complex plane leads to counts in between the piles denoting |0>
        and |1>. The points laong this line are distributed due to statistical measurement noise and ultimately
        due to the quantum uncertainty. As the averaging takes place in the complex plane, all points recorded
        in such a measurement should be located on a line. In case of a phase sensitive reflection measurement
        at a high internal quality cavity, points may span a segment of a circle that can be caused by noise
        caused measurements of the resonator position away from the two quantumn positions.

        The line shape of the data distribution generally justifies the projection of measurement data to an
        arbitrary axis without altering the internal data shape.

        Errors are calculated projected on the axis complex data points are aligned along. Data is normalized
        prior to returning.

        Requires coordinate, amplitude, and phase data to be specified via entries keyword. 
        self.amplitude and self.phase can be lists of vectors or a single (averaged) data set.
        '''

        if self.file_name[-2:] == 'h5':
            self.discover_hdf_data(entries=entries)
            if len(self.urls) != 3:
                logging.error('Invalid entry specification. Aborting.')
                return
            self.coordinate, self.amplitude, self.phase = self.read_hdf_data(
                return_data=True)[0]
        else:
            logging.warning('Reading out amplitude and phase attributes.')

        #generate complex data array
        self.c_raw = np.array(self.amplitude) * np.exp(
            1j * np.array(self.phase))

        if len(self.c_raw.shape) > 1:
            #calculate mean of complex data
            c = np.mean(self.c_raw, axis=0)
        else:  #given data is already averaged
            c = self.c_raw

        #point in complex plane with maximum sumed mutual distances
        s = np.zeros_like(np.abs(c))
        for i in range(len(c)):
            for p in c:
                s[i] += np.abs(p - c[i])
        cmax = np.extract(s == np.max(s), c)

        #calculate distances
        data_opt = np.abs(c - cmax)

        if len(self.c_raw.shape) > 1:  #calculate errors in line direction
            #find maximum complex point in maximum distance
            d = 0
            for p in c:
                if np.abs(p - cmax) > d:
                    d = np.abs(p - cmax)
                    cdist = p
            #find unit vector in line direction
            vunit = (cdist - cmax) / np.abs(cdist - cmax)

            #calculate projected distances via dot product, projecting along the data direction
            #errors via std
            dist_proj = [0] * len(c)
            errs = np.zeros_like(c)
            for i, ci in enumerate(self.c_raw.T):  #for each iteration
                dist_proj[i] = [
                    np.abs(
                        np.vdot(
                            [np.real(vunit), np.imag(vunit)], [
                                np.real(cr) - np.real(c[i]),
                                np.imag(cr) - np.imag(c[i])
                            ])) for cr in ci
                ]
                errs[i] = np.std(dist_proj[i]) / np.sqrt(len(dist_proj[i]))

        #normalize optimized data
        data_opt -= np.min(data_opt)
        maxv = np.max(data_opt)
        data_opt /= maxv
        if len(self.c_raw.shape) > 1:
            errs /= maxv

        #gauss plane plot
        if self.cfg['show_complex_plot'] and self.cfg['matplotlib']:
            if len(self.c_raw.shape) > 1:
                plt.figure(figsize=(10, 13))
                ax1 = plt.subplot2grid((4, 1), (0, 0))
                ax2 = plt.subplot2grid((4, 1), (1, 0), rowspan=3)
                ax1.errorbar(data_opt,
                             np.zeros_like(data_opt),
                             xerr=np.real(errs),
                             color='blue',
                             fmt='o',
                             elinewidth=0.8,
                             capsize=5,
                             markersize=8,
                             ecolor='red')
                ax1.plot([0], [0], '*', color='red', markersize=20)
                prange = np.max(data_opt) - np.min(data_opt)
                ax1.set_xlim(
                    np.min(data_opt) - 0.05 * prange,
                    np.max(data_opt) + 0.05 * prange)

                ax2.plot(np.real(c), np.imag(c), '.')
                ax2.plot(np.real(c)[:10], np.imag(c)[:10], '.',
                         color='r')  #show first 10 data points in red
                ax2.plot(np.real(cmax),
                         np.imag(cmax),
                         '*',
                         color='black',
                         markersize=15)
                self.errors = np.real(np.array(errs))
            else:
                plt.figure(figsize=(10, 10))
                plt.plot(np.real(c), np.imag(c), '.')
                plt.plot(np.real(c)[:10], np.imag(c)[:10], '.',
                         color='r')  #show first 10 data points in red
                plt.plot(np.real(cmax),
                         np.imag(cmax),
                         '*',
                         color='black',
                         markersize=15)

                self.errors = None

        self.data = np.real(np.array(data_opt))
        self._save_opt_data()
示例#53
0
文件: RBS.py 项目: dhyang33/vision
def BTEblkEstimate(blkIm, airlight, lamb, fTrans):
    global ld
    le = []
    Trans = 0.0
    nTrans = np.floor(1.0 / fTrans * 128)
    fMinCost = sys.maxsize
    # or 9223372036854775807
    numberOfPixels = blkIm.shape[0] * blkIm.shape[1] * blkIm.shape[2]
    nCounter = 0
    bgr = cv2.split(blkIm)
    while (nCounter < (1 - fTrans) * 10):
        color = copy.deepcopy(bgr[0])
        bChannel = BTEpreDehaze(color, airlight[0], nTrans)
        #print("bChannel "+str(bChannel[0,0]))
        if ld:
            ld = False
            le = bChannel
            guess = False

        color = copy.deepcopy(bgr[1])
        gChannel = BTEpreDehaze(color, airlight[1], nTrans)

        color = copy.deepcopy(bgr[2])
        rChannel = BTEpreDehaze(color, airlight[2], nTrans)

        nSumOfLoss = 0

        condition = bChannel > 255
        condExtract = np.array(np.extract(condition, bChannel))
        condExtract = np.sum((condExtract - 255)**2)
        nSumOfLoss += condExtract
        condition = bChannel < 0
        condExtract = np.array(np.extract(condition, bChannel))
        nSumOfLoss += np.sum(condExtract**2)

        condition = gChannel > 255
        condExtract = np.array(np.extract(condition, gChannel))
        condExtract = np.sum((condExtract - 255)**2)
        nSumOfLoss += condExtract
        condition = gChannel < 0
        condExtract = np.array(np.extract(condition, gChannel))
        nSumOfLoss += np.sum(condExtract**2)

        condition = rChannel > 255
        condExtract = np.array(np.extract(condition, rChannel))
        condExtract = np.sum((condExtract - 255)**2)
        condition = rChannel < 0
        condExtract = np.array(np.extract(condition, rChannel))
        nSumOfLoss += np.sum(condExtract**2)

        nSumOfSquareOuts = np.sum(np.multiply(bChannel, bChannel)) + np.sum(
            np.multiply(gChannel, gChannel)) + np.sum(
                np.multiply(rChannel, rChannel))
        nSumOfOuts = np.sum(bChannel) + np.sum(gChannel) + np.sum(rChannel)
        fMean = nSumOfOuts / numberOfPixels
        fCost = lamb * nSumOfLoss / numberOfPixels - \
            (nSumOfSquareOuts/numberOfPixels - fMean**2)
        if nCounter == 0 or fMinCost > fCost:
            fMinCost = fCost
            Trans = fTrans
        fTrans = fTrans + 0.1
        nTrans = 1.0 / fTrans * 128.0
        nCounter = nCounter + 1
    return Trans
import numpy as np
import pandas as pd
from scipy import optimize

import mini1.ddm_sampler as ddm

data_file = './data/dots_psychophysics.txt'
tdata = pd.read_csv(data_file, delimiter=' ', skipinitialspace=True, index_col=None, header=None, names=['coherence', 'direction', 'choice', 'rewarded', 'rt'])
tdata = tdata.values
scalar_rt = tdata[:,4]/10
tdata[:,4] = scalar_rt.astype(np.int)
coherence_values = np.unique(tdata[:,0])

plt.hist(np.extract(tdata[:,0]==0.724, tdata[:, 4]), bins=100);plt.show()

#res = optimize.minimize(ddm.ddm_pdf, np.array([0.0015, 0.03]), args=(tdata[:,4].astype(np.int),))
#print(res.x)
#print(res.success)
#print(res.message)

res = optimize.minimize(ddm.ddm_pdf, np.array([0.0015, 0.03]), args=(fake_data), method = 'Nelder-Mead', tol=0.05, options={'maxiter':200}); print(res)

# final_simplex: (array([[0.00177656, 0.05034375],
#       [0.00168281, 0.05371875],
#       [0.00159844, 0.04715625]]), array([5.27673636, 5.28378169, 5.29123604]))
#           fun: 5.276736355372437
#       message: 'Optimization terminated successfully.'
#          nfev: 15
#           nit: 7
#        status: 0
#       success: True
示例#55
0
 def get_indices(valid_mask: np.ndarray) -> np.ndarray:
     return np.extract(valid_mask, np.array(range(valid_mask.size)))
示例#56
0
def findOptRoute(possible_locations, D, start, elevation_change_indicator):
    possible_locations, distances_to_home = removeDistantSites(
        possible_locations, start, D)
    if len(possible_locations) > 24:
        # distances_to_start_direct=getMatrixOneDirect(possible_locations,start)
        #         direct_to_walk_frac=np.min(np.true_divide(distances_to_start_direct,distances_to_home[1:]))
        # print direct_to_walk_frac
        #         print np.mean(np.true_divide(distances_to_start_direct,distances_to_home[1:]))
        matrix = getMatrixDirect(np.concatenate((start, possible_locations)))
    else:
        matrix = getMatrixMQ(np.concatenate((start, possible_locations)))

    numbers_seen, paths, path_distances = getAllRoutes(matrix, D)

    # Pick best route and rank the rest
    try:
        opt_routes_ind = np.argwhere(
            numbers_seen == np.amax(numbers_seen)).flatten().tolist()
    except:
        a = 0
        return a, a, a, a, a, a, a, a, a, a, a, a, a
    opt_route_max_dist_ind = opt_routes_ind[np.argmax(
        np.array(path_distances)[opt_routes_ind])]
    opt_path = paths[opt_route_max_dist_ind]
    paths = np.extract(numbers_seen > 0, paths)
    path_distances = np.extract(numbers_seen > 0, path_distances)
    numbers_seen = np.extract(numbers_seen > 0, numbers_seen)
    paths_sorted_ind = np.argsort(numbers_seen)[::-1]
    numbers_seen_sorted = numbers_seen[paths_sorted_ind]
    distances_sorted = path_distances[paths_sorted_ind]
    paths_sorted = paths[paths_sorted_ind]
    percent_diff_distance = np.true_divide(np.abs(distances_sorted - D), D)

    #Adjust path ranking if flat elevation desired
    if elevation_change_indicator == 1:
        #Don't want to add a point too far because will change elevation when adding a point
        numbers_seen_sorted = np.extract(percent_diff_distance < 0.15,
                                         numbers_seen_sorted)
        paths_sorted = np.extract(percent_diff_distance < 0.15, paths_sorted)
        distances_sorted = np.extract(percent_diff_distance < 0.15,
                                      distances_sorted)

        elevation_rank, mean_elevation_change, max_elevation_change = rankElevations(
            possible_locations, paths_sorted, start)
        paths_sorted = paths_sorted[elevation_rank]
        opt_path = paths_sorted[0]

    # Get the locations along the route
    opt_route_locations = np.array(start)
    for j in opt_path:
        x = np.reshape(
            possible_locations[j - 1],
            (1,
             2))  #Because j was calculated from array with starting point at 0
        opt_route_locations = np.concatenate((opt_route_locations, x))
    opt_route_locations = np.concatenate((opt_route_locations, start))

    direction_coordinates, narratives, distance = getFinalRoute(
        opt_route_locations)

    if distance < 0.9 * D:
        opt_route_locations, direction_coordinates, narratives, distance, added_indicator = addPoint(
            opt_route_locations, D, distance)
    else:
        added_indicator = 0
    elevations, distances_el = getElevationMQ(direction_coordinates[0])
    mean_elevation_change = np.mean(
        np.true_divide(np.abs(np.diff(elevations)), distances_el[1:]))
    max_elevation_change = np.max(
        np.true_divide(np.abs(np.diff(elevations)), distances_el[1:]))

    return opt_route_locations, distance, direction_coordinates, narratives, added_indicator, paths_sorted, distances_sorted, numbers_seen_sorted, possible_locations, mean_elevation_change, max_elevation_change, elevations, distances_el
示例#57
0
def main(lata, longa, latb, longb):
    lata = conv.convert_gps(float(lata))
    longa = conv.convert_gps(float(longa))
    latb = conv.convert_gps(float(latb))
    longb = conv.convert_gps(float(longb))
    try:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    except:
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        except:
            from pylibfreenect2 import CpuPacketPipeline
            pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)
    #create list for gps location of obstacles

    ###############################################################

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = False
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
     if need_color_depth_map else None

    while True:
        frames = listener.waitForNewFrame()
        #time.sleep(5)
        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        # NOTE for visualization:
        # cv2.imshow without OpenGL backend seems to be quite slow to draw all
        # things below. Try commenting out some imshow if you don't have a fast
        # visualization backend.
        #cv2.imshow("ir", ir.asarray() / 65535.)
        #time.sleep(2)
        cv2.imshow("depth", depth.asarray() / 4500.)

        #Comment the coming lines
        print('center value is:', depth.asarray(np.float32).item((212, 256)))
        print('down1 value is:', depth.asarray(np.float32).item((270, 256)))
        '''print('up1 value is:',depth.asarray(np.float32).item((150,256)))
		print('up2 value is:',depth.asarray(np.float32).item((100,256)))
		
		print('down2 value is:',depth.asarray(np.float32).item((350,256)))
		print('right1 value is:',depth.asarray(np.float32).item((212,300)))
		print('right2 value is:',depth.asarray(np.float32).item((212,350)))
		print('left1 value is:',depth.asarray(np.float32).item((212,200)))
		print('left2 value is:',depth.asarray(np.float32).item((212,150)))'''

        x = depth.asarray(np.float32)
        y = np.logical_and(np.greater(x, 1200), np.less(x, 1500))
        #print(np.extract(y, x))
        no_of_pixels = np.count_nonzero(np.extract(y, x))
        print(no_of_pixels)
        #get gps coordinate at this location########
        '''
		assumingthegps received is lat,long variables:
		obs.append([lat,long])
		this is to be retrieved in the path planning code.
		'''
        if no_of_pixels > 14000:  #approx distance from the camera = 1.5 m
            print('big Obstacle found, stop!!!')
            obs.append(findPosition())
        elif no_of_pixels > 8000:  #approx distance from the camera = 1.5 m
            print('small Obstacle found!!')
            obs.append(findPosition())
        #cv2.imshow("color", cv2.resize(color.asarray(),(int(1920 / 3), int(1080 / 3))))
        #cv2.imshow("registered", registered.asarray(np.uint8))

        if need_bigdepth:
            cv2.imshow(
                "bigdepth",
                cv2.resize(bigdepth.asarray(np.float32),
                           (int(1920 / 3), int(1082 / 3))))
        if need_color_depth_map:
            cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

        listener.release(frames)
        #time.sleep(20)
        key = cv2.waitKey(delay=1) & 0xFF
        if key == ord('q'):
            break

    #######################################################

    target(latb, longb, lata, longa)
    #stage3.py main  fnc
    #######################################################
    device.stop()
    device.close()
示例#58
0
    def segment(self, frame):

        blur = cv2.medianBlur(frame, 51)

        # Get red channel
        r = blur.copy()
        r[:, :, 0] = 0
        r[:, :, 1] = 0

        # Convert to greyscale
        grey = self.bgr_2_grey(r)

        # Down sample 1/2 x 1/2
        scale = 2
        w = int(grey.shape[1] / scale)
        h = int(grey.shape[0] / scale)

        half = cv2.pyrDown(grey, dstsize=(w, h))
        img = half.copy()

        # Largest grey value
        L = img.max()

        # mean grey value
        muT = np.mean(img)

        # Number of pixels
        N = img.size

        Gv = cv2.Sobel(img, cv2.CV_32F, 1, 0, ksize=5)
        Gh = cv2.Sobel(img, cv2.CV_32F, 0, 1, ksize=5)
        g = np.sqrt((Gh**2) + (Gv**2))
        gmean = np.mean(g)
        if gmean == 0:
            return frame

        h = np.zeros(L)
        h_prime = np.zeros(L)
        w = np.zeros(L)
        sigma_T = 0
        maxKt = 0
        max_eta = 0
        optimal_thresh = 0
        final = img.copy()
        for i in xrange(0, L):
            # Number of pixels at threshold i
            h[i] = len(np.extract(img == i, img))
            if h[i] == 0:
                h[i] == 1

            w[i] = (L / 1) * (1 / gmean)
            h_prime[i] = w[i] * h[i]
            sigma_T = sigma_T + ((i - muT)**2) * (h_prime[i] / N)

        for t in xrange(0, L):
            lambda_t = (-t + L) * (1 / gmean)

            # Classes
            C0 = np.extract(img <= t, img)
            C1 = np.extract(img > t, img)
            if C0.any() and C1.any():

                # mean of classes
                mu_0 = np.mean(C0)
                mu_1 = np.mean(C1)

                omega_0 = float(len(C0)) / float(N)
                omega_1 = float(len(C1)) / float(N)

                sigma_B = (omega_0 * omega_1) * ((mu_1 - mu_0)**2)

                eta_t = sigma_B / sigma_T
                kt = lambda_t * eta_t
                if kt > maxKt:
                    maxKt = kt
                    optimal_thresh = t

                # Threshold using calculated threshold value (otimal threshold)
        temp_img = img.copy()

        w = int(temp_img.shape[1] * scale)
        h = int(temp_img.shape[0] * scale)

        # Scale up to original size
        temp_img = cv2.pyrUp(temp_img, dstsize=(w, h))

        ret, mask = cv2.threshold(temp_img, optimal_thresh, 255,
                                  cv2.THRESH_BINARY)

        # Invert mask to the region we want (the lumen)
        mask_inv = cv2.bitwise_not(mask)

        # Get masked images
        # colour_masked = cv2.bitwise_and(crop,crop,mask = mask_inv)
        grey_masked = cv2.bitwise_and(temp_img, temp_img, mask=mask_inv)

        # Find contours in masked image and get largest region
        contours = cv2.findContours(grey_masked.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)[-2]
        mask = np.ones(frame.shape[:2], dtype="uint8") * 255
        c = max(contours, key=cv2.contourArea)

        # Mask to leave only the largest thresholded region
        mask_cnt = cv2.drawContours(mask, [c], -1, 0, -1)
        mask_cnt_inv = cv2.bitwise_not(mask_cnt)
        LRC = cv2.bitwise_and(frame, frame, mask=mask_cnt_inv)

        ## Zabulis et al.
        smax = 0
        for i in contours:
            A = cv2.contourArea(i)

            if A != 0 and cv2.arcLength(i, True) != 0:
                C = A / cv2.arcLength(i, True)

                mask = np.ones(grey.shape[:2], dtype="uint8") * 255
                mask_cnt = cv2.drawContours(mask, [i], -1, 0, -1)
                mask_inv = cv2.bitwise_not(mask_cnt)
                colour_masked = cv2.bitwise_and(frame, frame, mask=mask_inv)

                mI = cv2.mean(grey, mask=mask_inv)
                I = 1 + (1 - mI[0])

                S = (I**2) * C * A
                if S > smax:
                    smax = S
                    LRC = colour_masked.copy()
                    LRC[mask == 255] = (255, 255, 255)

        return LRC
示例#59
0
    img1 = io.imread("Supervised/food_pics/food_pics/" + folder + "/" +
                     files[0])
    img2 = io.imread("Supervised/food_pics/food_pics/" + folder + "/" +
                     files[1])
    grey1, grey2 = color.rgb2grey(img1), color.rgb2grey(img2)
    cond1, cond2 = grey1 != 0, grey2 != 0
    img1_src = io.imread("Supervised/food_pics/food_pics_source/" + folder +
                         "/" + files_src[0])
    img2_src = io.imread("Supervised/food_pics/food_pics_source/" + folder +
                         "/" + files_src[1])
    if img1.shape != img1_src.shape:
        img_copy = img2_src.copy()
        img2_src = img1_src
        img1_src = img_copy
    print(folder)
    img1_r1, img1_r2 = np.extract(cond1, img1_src), np.extract(
        np.invert(cond1), img1_src)
    img2_r1, img2_r2 = np.extract(cond2, img2_src), np.extract(
        np.invert(cond2), img2_src)
    greyimg1, greyimg2 = color.rgb2grey(img1_src), color.rgb2grey(img2_src)
    greyimg1_r1, greyimg1_r2 = np.extract(cond1, greyimg1), np.extract(
        np.invert(cond1), greyimg1)
    greyimg2_r1, greyimg2_r2 = np.extract(cond2, greyimg2), np.extract(
        np.invert(cond2), greyimg2)
    hsvimg1, hsvimg2 = color.rgb2hsv(img1_src), color.rgb2hsv(img2_src)
    hsvimg1_r1, hsvimg1_r2 = np.extract(cond1, hsvimg1), np.extract(
        np.invert(cond1), hsvimg1)
    hsvimg2_r1, hsvimg2_r2 = np.extract(cond2, hsvimg2), np.extract(
        np.invert(cond2), hsvimg2)

    traindata[i, :3], traindata[-i - 1, :3] = np.mean(
示例#60
0
def categorical_crossentropy(Y_hat, Y):
    return np.mean(-np.log(np.extract(Y, Y_hat)))