Пример #1
0
def build_rnn_dataset(steps):
    rnntrain,rnntest=makernnset(steps)
    print "trainset.shape, testset.shape =", rnntrain.shape, rnntest.shape
    X_train, y_train = np.dsplit(rnntrain,[5])
    X_valid, y_valid = np.dsplit(rnntest,[5])
                                           
    X_train = normalize(X_train)
    X_valid = normalize(X_valid)
    print 'X_train.shape, y_train.shape =', X_train.shape, y_train.shape
    return X_train, y_train, X_valid, y_valid
Пример #2
0
def generate_rgb_array(filename):
	imageframes = []

	if len(filename) == 1:
		filename = filename[0]



	# reading tif image into memory and turning into numpy array
	im = Image.open(filename)
	im.load()
	imarray = np.array(im)
	print np.unique(np.dsplit(imarray,3)[0]),np.unique(np.dsplit(imarray,3)[1]),np.unique(np.dsplit(imarray,3)[2])

	return imarray
Пример #3
0
def IMT_find_col(imin, col):
    # This is the color distance from the reference point
    coldist = imin.colorDistance( col )
    lolout = Image( npboost(  np.squeeze(np.dsplit( coldist.getNumpy(), 3)[0] )  ) )
    modim = lolout.stretch(0,20)
    openim = m_open(modim,1).binarize()
    return IMT_calccentroid(openim)
Пример #4
0
def surf2CV(surf, cvImage):
	"""
	Given a Pygame surface, convert to an OpenCv cvArray format.
	Either Ipl image or cvMat.
	surf2CV( pygame.Surface src, cv.Image dest )
	(From http://facial-expression-recognition.googlecode.com/svn/trunk/code/conversion.py)
	( Extracted 2012-Jul-16 22:37EDT by GKF)
	"""
	from numpy import dsplit, dstack
	cv.Set(cvImage, (0,0,0))
	arr = pygame.surfarray.pixels3d(surf).transpose(1,0,2) # Reshape to 320x240
	r,g,b = dsplit(arr,3)
	arr = dstack((b,g,r))
	dtype2depth = {
	'uint8': cv.IPL_DEPTH_8U,
	'int8': cv.IPL_DEPTH_8S,
	'uint16': cv.IPL_DEPTH_16U,
	'int16': cv.IPL_DEPTH_16S,
	'int32': cv.IPL_DEPTH_32S,
	'float32': cv.IPL_DEPTH_32F,
	'float64': cv.IPL_DEPTH_64F,
	}
	try:
		nChannels = arr.shape[2]
	except:
		nChannels = 3
	try:
		cv.SetData(cvImage, arr.tostring(),arr.dtype.itemsize*nChannels*arr.shape[1])
	except:
		print "Error is: ",
		print sys.exc_info()[0]
Пример #5
0
def multiply_3x3_mat(src, mat):
    """RGBの各ピクセルに対して3x3の行列演算を行う"""

    # 正規化用の係数を調査
    normalize_val = (2 ** (8 * src.itemsize)) - 1

    # 0 .. 1 に正規化して RGB分離
    b, g, r = np.dsplit(src / normalize_val, 3)

    # 行列計算
    ret_r = r * mat[0][0] + g * mat[0][1] + b * mat[0][2]
    ret_g = r * mat[1][0] + g * mat[1][1] + b * mat[1][2]
    ret_b = r * mat[2][0] + g * mat[2][1] + b * mat[2][2]

    # オーバーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.min(ret_r, 1.0)
    ret_g = cv2.min(ret_g, 1.0)
    ret_b = cv2.min(ret_b, 1.0)

    # アンダーフロー確認(実は Matrixの係数を調整しているので不要)
    ret_r = cv2.max(ret_r, 0.0)
    ret_g = cv2.max(ret_g, 0.0)
    ret_b = cv2.max(ret_b, 0.0)

    # RGB結合
    ret_mat = np.dstack( (ret_b, ret_g, ret_r) )

    # 0 .. 255 に正規化
    ret_mat *= normalize_val

    return np.uint8(ret_mat)
Пример #6
0
def fun_add_channels(img, weights):
    """
    A functional implementation of the same function
    """
    return (reduce(lambda a, b: a+b,
                   [weight * channel for weight, channel in list(
                    zip(weights, np.dsplit(img, img.shape[2])))]))[:, :, 0]
Пример #7
0
def conditional_maximum_correlation_pmf(pmf):
    """
    Compute the conditional maximum correlation from a 3-dimensional
    pmf. The maximum correlation is computed between the first two dimensions
    given the third.

    Parameters
    ----------
    pmf : np.ndarray
        The probability distribution.

    Returns
    -------
    rho_max : float
        The conditional maximum correlation.
    """
    pXYgZ = pmf / pmf.sum(axis=(0,1), keepdims=True)
    pXgZ = pXYgZ.sum(axis=1, keepdims=True)
    pYgZ = pXYgZ.sum(axis=0, keepdims=True)
    Q = np.where(pmf, pXYgZ / (np.sqrt(pXgZ)*np.sqrt(pYgZ)), 0)
    Q[np.isnan(Q)] = 0

    rho_max = max([svdvals(np.squeeze(m))[1] for m in np.dsplit(Q, Q.shape[2])])

    return rho_max
Пример #8
0
def cat_trials(x):
    """ Concatenate trials along time axis.

    Parameters
    ----------
    x : array_like
        Segmented input data of shape [`n`,`m`,`t`], with `n` time samples, `m` signals, and `t` trials.

    Returns
    -------
    out : ndarray
        Trials are concatenated along the first (time) axis. Shape of the output is [`n``t`,`m`].

    See also
    --------
    cut_segments : Cut segments from continuous data

    Examples
    --------
    >>> x = np.random.randn(150, 4, 6)
    >>> y = cat_trials(x)
    >>> y.shape
    (900, 4)
    """
    x = np.atleast_3d(x)
    t = x.shape[2]
    return np.squeeze(np.vstack(np.dsplit(x, t)), axis=2)
Пример #9
0
def main():
	# 构造一个数组
	a = arange(9).reshape(3,3)
	print a
	# [[0 1 2]
	#  [3 4 5]
	#  [6 7 8]]


	# 横向拆分
	hs = hsplit(a,3)
	print hs
	# [
	# 	array([[0],[3],[6]]), 
	# 	array([[1],[4],[7]]), 
	# 	array([[2], [5], [8]])
	# ]


	# 纵向拆分
	vs = vsplit(a,3)
	print vs
	# [
	# 	array([[0, 1, 2]]), 
	# 	array([[3, 4, 5]]), 
	# 	array([[6, 7, 8]])
	# ]


	# 深向拆分,数组需要为三维数组
	b = arange(27).reshape(3,3,3)
	ds = dsplit(b,3)
	print ds
Пример #10
0
def median_filter(image, selem=None):
    if selem is None:
        # default mask is 5x5 square
        selem = square(5)
    depth = image.shape[2]
    return np.dstack(median(channel[...,0], selem)
                     for channel in np.dsplit(image, depth)) / 255.
Пример #11
0
def colors_peripheral_vs_central(image_roi, attrs={}, debug=False):
    image_roi, center = pad_for_rotation(image_roi)
    lesion_mask = image_roi[..., 3]

    goal = lesion_mask.sum() * 0.7
    inner = lesion_mask.copy()
    while inner.sum() > goal:
        inner = binary_erosion(inner, disk(1))
    outer = np.logical_and(lesion_mask, np.logical_not(inner))

    if debug:
        print """\
=== Colors Peripheral vs Central ===
lesion area: %d
inner goal: %d
inner area: %d
outer area: %d
""" % (lesion_mask.sum(), goal, inner.sum(), outer.sum())

    if debug:
        plt.subplot(131)
        plt.imshow(lesion_mask)
        plt.subplot(132)
        plt.imshow(inner)
        plt.subplot(133)
        plt.imshow(outer)
        plt.show()

    outer = np.nonzero(outer)
    inner = np.nonzero(inner)

    image_lab = rgb2lab(image_roi[..., :3])
    L, a, b = np.dsplit(image_lab, 3)

    delta_L = np.mean(L[outer]) - np.mean(L[inner])
    delta_a = np.mean(a[outer]) - np.mean(a[inner])
    delta_b = np.mean(b[outer]) - np.mean(b[inner])

    density_L = (
        np.histogram(L[outer], 100, (0.,100.), density=True)[0] *
        np.histogram(L[inner], 100, (0.,100.), density=True)[0]
    ).sum()
    density_a = (
        np.histogram(a[outer], 254, (-127.,127.), density=True)[0] *
        np.histogram(a[inner], 254, (-127.,127.), density=True)[0]
    ).sum()
    density_b = (
        np.histogram(b[outer], 254, (-127.,127.), density=True)[0] *
        np.histogram(b[inner], 254, (-127.,127.), density=True)[0]
    ).sum()

    attrs.update([
        ('Colors PvsC mean difference L', delta_L),
        ('Colors PvsC mean difference a', delta_a),
        ('Colors PvsC mean difference b', delta_b),
        ('Colors PvsC density baysian L', density_L),
        ('Colors PvsC density baysian a', density_a),
        ('Colors PvsC density baysian b', density_b),
    ])
Пример #12
0
def _swaplch(LCH):
    "Reverse the order of an LCH numpy dstack or tuple for analysis."
    try:  # Numpy array
        L, C, H = np.dsplit(LCH, 3)
        return np.dstack((H, C, L))
    except:  # Tuple
        L, C, H = LCH
        return H, C, L
Пример #13
0
def alpha_blend(image, background):
    "Une dos imagenes con opacidad, usando numpy"

    image, alpha = np.dsplit(image, np.array([3]))
    image = image
    alpha = 1 - alpha 
    resultado = image * alpha + background * (1 - alpha)
    return resultado
Пример #14
0
def montage(vol, ncols=None):
    """Returns a 2d image monage given a 3d volume."""
    ncols = ncols if ncols else int(np.ceil(np.sqrt(vol.shape[2])))
    rows = np.array_split(vol, range(ncols,vol.shape[2],ncols), axis=2)
    # ensure the last row is the same size as the others
    rows[-1] = np.dstack((rows[-1], np.zeros(rows[-1].shape[0:2] + (rows[0].shape[2]-rows[-1].shape[2],))))
    im = np.vstack([np.squeeze(np.hstack(np.dsplit(row, ncols))) for row in rows])
    return(im)
Пример #15
0
def rgb_to_hsv(img):
    h,w,d = img.shape
    r, g, b = np.dsplit(img,3)
    maxc = img.max(axis=2).reshape(h,w,1)
    minc = img.min(axis=2).reshape(h,w,1)
    s = (maxc-minc) / maxc
    v = maxc
    imgc = (maxc-img)/(maxc-minc)
    rc, gc, bc = np.dsplit(imgc,3)
    h = np.where(maxc==r, bc-gc,
        np.where(maxc==g, 2.0+rc-bc,
                             4.0+gc-rc))
    h = (h/6.0) % 1.0
    hsv = np.dstack([h,s,v])
    v0 = np.dstack([np.zeros_like(h),np.zeros_like(h),v])
    mask = minc == maxc
    mask = np.dstack([mask,mask,mask])
    return np.where(mask, v0, hsv)
Пример #16
0
def shift_image(im, shift):
    delta_y = shift[0]
    delta_x = shift[1]
    imOut = np.zeros(im.shape)
    for i, c in enumerate(np.dsplit(im, 3)):
        c = c[:, :, 0]
        Y = np.arange(c.shape[0])
        X = np.arange(c.shape[1])
        f = interp2d(X + delta_x, Y + delta_y, c)
        imOut[:, :, i] = f(X, Y)
    return imOut
Пример #17
0
    def write(cls, metadata, imagedata, outbase, voxel_order='LPS'):
        """
        Create png files for each image in a list of pixel data.

        Parameters
        ----------
        metadata : object
            fully loaded instance of a NIMSReader.
        imagedata : dict
            dictionary of np.darrays. label suffix as keys, with np.darrays as values.
        outbase : str
            output name prefix.
        voxel_order : str [default None]
            three character string indicating the voxel order, ex. 'LPS'.

        Returns
        -------
        results : list
            list of files written.

        Raises
        ------
        NIMSDataError
            metadata or data is None.

        """
        super(NIMSPNG, cls).write(metadata, imagedata, outbase, voxel_order)  # XXX FAIL! unexpected imagedata = None
        results = []
        for data_label, data in imagedata.iteritems():
            if data is None:
                continue
            if voxel_order and metadata.qto_xyz:  # cannot reorder if no affine
                data, qto_xyz = cls.reorder_voxels(data, metadata.qto_xyz, voxel_order)
            else:
                qto_xyz = metadata.qto_xyz
            outname = outbase + data_label
            data = np.dsplit(data, len(metadata._dcm_list))  # cut the darray
            data = [image.squeeze() for image in data]  # squeeze; remove axis with 1 val
            for i, data in enumerate(data):
                filepath = outname + '_%d' % (i + 1) + '.png'
                if data.ndim == 2:
                    data = data.astype(np.int32)
                    data = data.clip(0, (data * (data != (2**15 - 1))).max())  # -32768->0; 32767->brain.max
                    data = data * (2**8 - 1) / data.max()  # scale to full 8-bit range
                    Image.fromarray(data.astype(np.uint8), 'L').save(filepath, optimize=True)
                elif data.ndim == 3:
                    data = data.reshape((data.shape[1], data.shape[2], data.shape[0]))
                    Image.fromarray(data, 'RGB').save(filepath, optimize=True)
                log.debug('generated %s' % os.path.basename(filepath))
                results.append(filepath)
            log.debug('returning:  %s' % filepath)
        return results
Пример #18
0
def IMT_calccentroid(imin, *args):
    immod = np.squeeze(np.dsplit( imin.getNumpy().astype(float), 3)[0] )/255

    xvals = np.arange(0,imin.width)
    xstack = np.transpose( np.tile( xvals, (imin.height,1) ) )
    xmoments = np.multiply(xstack, immod).flatten()
    xbar = np.sum(xmoments)/np.sum(immod)

    yvals = np.arange(0,imin.height)
    ystack = np.tile( yvals, (imin.width,1) )
    ymoments = np.multiply(ystack, immod).flatten()
    ybar = np.sum(ymoments)/np.sum(immod)
    return [xbar,ybar]
def dir_tournant(x, y, a=0, b=0):
    """ Permet de generer un champ tournant vers la gauche
            et centre en (a,b)"""
    x, y = translate(x, y, a, b)
    if type(x) in [int, float, np.float64]:
        return normalize([-y, x])
    # vect = np.array()
    z = np.dstack((-y, x))
    U, V = np.dsplit(np.apply_along_axis(normalize, axis=2, arr=z), 2)
    U = np.apply_along_axis(lambda x: x[0], axis=2, arr=U)
    V = np.apply_along_axis(lambda x: x[0], axis=2, arr=V)
    # print vect, '-' * 100
    return np.array([U, V])
Пример #20
0
def lab_to_xyz(LAB, wp):

    L, a, b = np.dsplit(LAB, 3)
    fy = (L + 16) / 116.0
    fz = fy - b / 200.0
    fx = a / 500.0 + fy

    def finv(y):
        y = copy.copy(y)  # CEBALERT: why copy?
        eps3 = EPS ** 3
        return np.where(y > eps3, np.power(y, 3), (116 * y - 16) / KAP)

    xr, yr, zr = finv(fx), finv(fy), finv(fz)
    return np.dstack((xr * wp[0], yr * wp[1], zr * wp[2]))
Пример #21
0
    def getGridSize(self, dataSet):
        # create a super-participant, with all fixtions from all participants in this dataset
        superParticipant = data.Participant(dataSet.participantList[0].fixationList, None, None)
        for p in dataSet.participantList[1:]:
            superParticipant.fixationList.extend(p.fixationList)

        # now get all fixations as pixel grid
        pix = superParticipant.generatePixelData(self.params['errorRadius'])

        # now do analysis on this grid
        count_array = numpy.dsplit(pix,2)[0]
        gridSep = grids.calculateGridSize(count_array, 3, 100, 5, 1.7)
        print "Grid width should be " + str(screen.width/gridSep) + " boxes"
        return gridSep, (screen.width/gridSep)
Пример #22
0
 def doResample(mapping, pxPerDeg, arcsecPerPx, containsPole):
     # trigger calculation of properties so that they are not included in the timing measurements
     mapping.lats
     mapping.latsCenter
     mapping.elevation
     mapping.img
     t0 = time.time()
     
     if containsPole is None:
         containsPole = mapping.containsPole
         
     if arcsecPerPx:
         pxPerDeg = plateCarreeResolution(mapping.boundingBox, arcsecPerPx)
     else:
         try:
             _, _ = pxPerDeg
         except TypeError:
             assert pxPerDeg is not None
             pxPerDeg = (pxPerDeg, pxPerDeg)
     print('pxPerDeg: ' + str(pxPerDeg))
     
     imgIsInt = mapping.img.dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64]
     
     # merge elevation with rgb array and extract channels afterwards
     merged = np.dstack((mapping.img.astype(np.float64).filled(np.nan),
                         mapping.elevation.filled(np.nan)))
     lats, lons, latsCenter, lonsCenter, merged = \
         _resample(mapping.latsCenter.filled(np.nan), mapping.lonsCenter.filled(np.nan), mapping.altitude,
                   merged,
                   lambda: mapping.outline, mapping.boundingBox,
                   pxPerDeg, mapping.containsDiscontinuity, containsPole,
                   method=method)
             
     img, elevation = np.dsplit(merged, [-1])
     if imgIsInt:
         with np.errstate(invalid='ignore'):
             img = np.round(img)
     img = np.require(ma.masked_invalid(img, copy=False), mapping.img.dtype)
     if mapping.img.ndim == 2:
         img = img.reshape(img.shape[0], img.shape[1])            
     elevation = elevation.reshape(elevation.shape[0],elevation.shape[1])
     elevation = ma.masked_invalid(elevation, copy=False)
 
     resampledMapping = mapping.createResampled(lats, lons, latsCenter, lonsCenter, elevation, img)
     
     print('resampling:', time.time()-t0, 's')
     
     return resampledMapping
Пример #23
0
def assign_frame(lhs, rhs_planes, **props):
    """Assigns data to VapourSynth video frame."""

    split = lambda x: [x] if len(x.shape) == 2 \
        else np.squeeze(np.dsplit(x, x.shape[2]))

    if type(rhs_planes) is np.ndarray:
        rhs_planes = [rhs_planes]

    lhs.props.update(props)

    rhs_planes = [x for l in rhs_planes for x in split(l)]

    for i, f in enumerate(rhs_planes):
        np.asarray(lhs.get_write_array(i)).__iadd__(f)

    return lhs  # Not conventional in Py, but useful
Пример #24
0
def get_bands(image):
    """ Pull out bands from image.
    (We assume bands correspond to the order orientation, coherence, energy)"""
    
    data = np.array(image)

    data /= 255 #each of the values are on 8-bit scales. (They don't all necessarily reach 255)
    
    hsv = colors.rgb_to_hsv(data)
    #bands = np.dsplit(hsv)
    bands = []
    
    for band in np.dsplit(hsv, hsv.shape[-1]):
        #bands.append(np.squeeze(band)) #have the 1-element arrays be scalars
        bands.append(band)
        
    return bands
Пример #25
0
def genbasepal():
    "Generate base palette for highpalookup system. \
All other palettes must be based on this one."
    imint = zeros([DIM, DIM, DIM], 'uint32');
    for i in range(DIM):
        for j in range(DIM):
            for k in range(DIM):
                imint[i,j,k] = ((i<<16)|(k<<8)|j)<<RESIDBITS;

    imint = hstack(dsplit(imint, DIM));

    imbyte = zeros([DIM, DIM*DIM, 3], 'uint8');

    for i in range(3):
        imbyte[:,:,i] = (imint[:,:,0]>>(i*8))&255;

    return imbyte;
Пример #26
0
def rgb_to_XYZ(img, mat=None):
    """RGBをXYZに変換する。mat が None の場合は cvtColor で XYZ変換する。
       その場合、色域は Rec.709、色温度は D65 に固定となる。"""
    if mat is not None:
        b, g, r = np.dsplit(img, 3)

        # 行列計算
        ret_X = r * mat[0][0] + g * mat[0][1] + b * mat[0][2]
        ret_Y = r * mat[1][0] + g * mat[1][1] + b * mat[1][2]
        ret_Z = r * mat[2][0] + g * mat[2][1] + b * mat[2][2]

        # XYZ結合
        ret_img = np.dstack( (ret_X, ret_Y, ret_Z) )
        
    else:
        ret_img = cv2.cvtColor(img, cv2.COLOR_BGR2XYZ)

    return ret_img
Пример #27
0
def rgb_to_xy(img, mat=None):
    """RGB から xy色度を算出。戻り値は x, y の配列"""

    # ゼロ割りを防ぐため、(0,0,0) のピクセルは (1,1,1) に変更する
    zero_px = (img[:,:,0] == 0) & (img[:,:,1] == 0) & (img[:,:,2] == 0) 
    zero_cancel_filter = np.dstack((np.dstack((zero_px, zero_px)), zero_px))
    img = img + zero_cancel_filter
 
    # 正規化
    normalize_val = (2 ** (8 * img.itemsize)) - 1
    img = np.float32(img / normalize_val)
    
    img_XYZ = rgb_to_XYZ(img, mat)
    X, Y, Z = np.dsplit(img_XYZ, 3)
    x = X / (X + Y + Z)
    y = Y / (X + Y + Z)

    return x.flatten(), y.flatten()
Пример #28
0
def xyz_to_lab(XYZ,wp):

    X,Y,Z = numpy.dsplit(XYZ,3)
    xn,yn,zn = X/wp[0], Y/wp[1], Z/wp[2]

    def f(t):
        t = t.copy() # probably unnecessary!
        t_eps = t>EPS
        t_not_eps = t<=EPS
        t[t_eps] = numpy.power(t[t_eps], 1.0/3)
        t[t_not_eps] = (KAP*t[t_not_eps]+16.0)/116.
        return t

    fx,fy,fz = f(xn), f(yn), f(zn)
    L = 116*fy - 16
    a = 500*(fx - fy)
    b = 200*(fy - fz)

    return numpy.dstack((L,a,b))
Пример #29
0
def hsv_to_rgb(hsv):
    h, s, v = np.dsplit(hsv,3)
    i = np.floor(h*6.0)
    f = (h*6.0) - i
    p = v*(1.0 - s)
    q = v*(1.0 - s*f)
    t = v*(1.0 - s*(1.0-f))
    i = np.dstack([i%6,i%6,i%6])
    s = np.dstack([s,s,s])
    where = np.where
    return (
        where(s == 0.0, np.dstack([v, v, v]), 
        where(i == 0,   np.dstack([v, t, p]), 
        where(i == 1,   np.dstack([q, v, p]), 
        where(i == 2,   np.dstack([p, v, t]), 
        where(i == 3,   np.dstack([p, q, v]), 
        where(i == 4,   np.dstack([t, p, v]), 
                        np.dstack([v, p, q]),
        )))))))
Пример #30
0
def view_bands( img, **kwargs ):
    """
    Plot each band as a separate single band subplot.

    Parameters
    ----------
    img : numpy.array
        This is the image array of shape (Rows,Cols,Bands)
    **kwargs : keyword arguments
        This is how the keyword arguments get passed in. I'll list them below.
    ncols : int
        The number of columns in the plot. (default value = 2)
    figwidth : int or float
        The width of figure. The height of the figure will be determined by the
        number of bands in the image. (Default value = 14)
    cmap : matplotlib.colors.Colormap, optional, default: None
        The colormap to be used by ``imshow`` to dispaly the bands.
    subset : numpy.s_
        A numpy slice object used to display a subset of the image rather than
        the whole thing.


    Returns
    -------
    Nothing
        This method dispalys a plot. Exactly how the plot is displayed is
        determined by your matplotlib settings.
    """
    nbands = img.shape[-1]
    ncols = kwargs.pop('ncols',2)
    figwidth = kwargs.pop('figwidth',14)
    cmap = kwargs.pop('cmap',None)
    nrows = int( ceil( nbands/2.0 ))
    subset = kwargs.pop('subset',np.s_[:,:,:])
    fig, axarr = subplots(nrows, ncols, sharex=True, sharey=True,
                            figsize=(figwidth, (figwidth/3.5)*nrows))
    for i, barr in enumerate( np.dsplit(img, nbands) ):
        ax = axarr.ravel()[i]
        axtit = "Band %i" % (i + 1)
        ax.set_title(axtit)
        ax.imshow( barr[subset].squeeze(), cmap=cmap )
        ax.axis('off')
Пример #31
0
 def _channelSplit(self, image):
     return np.dsplit(image, image.shape[-1])
Пример #32
0
print oned,toned
print np.column_stack((oned,toned))#一维数组列组合
print np.column_stack((a,b)) #二维数组列组合(与水平组合相同)
print np.row_stack((oned,toned))#一维数组行组合
print np.row_stack((a,b))#二维数组行组合(与垂直组合相同)
#数组的分割
print("=========数组的分割=========")
s=np.arange(9).reshape(3,3)
print(a)
print(np.hsplit(a,3))#垂直分割
print(np.split(a,3,axis=1))#垂直分割
print(np.vsplit(a,3))#水平分割
print(np.split(a,3,axis=0))#水平分割
c = np.arange(27).reshape(3,3,3)
print(c)
print(np.dsplit(c,3))#深度分割
print("==========数组的属性==========")
print(c.ndim)
print(c.size)
print(c.itemsize)
print(c.nbytes)
d=np.array([1+1j,3+2j])
print(d)
print(d.real)
print(d.imag)
e = np.arange(4).reshape(2,2)
f = e.flat#将e按行拉伸为一维数组
print(e)
print(f)
print(e.flat[1],e.flat[2])#输出拉伸后的一维数组中的第3个元素
print("==================数组的转换==================")
Пример #33
0
np.vstack( row, grid)

# adds a column to a 2d array
np.hstack(grid, column)

# splits array x on idicies the indicies 3,5
np.split(x, [3,5])

# splits a 2d array into upper and lower
upper, lower = np.vsplit( grid, [2])

# splits a 2d array into left and right
left, right = np.hsplit(grid, [2])

#np dsplit will split 3d arrays on the z axis
closer, farther = np.dsplit( cube, [2])



#   Trig operations

np.sin(theta)
np.cos(theta)
np.tan(theta)
np.arcsin(x)
np.arccos(x)
np.arctan(x)

np.exp(x)      #e  ^ x
np.exp2(x)     #2  ^ x
np.power(3,x)  #3  ^ x
Пример #34
0
2.垂直分割 vsplit(数组a,num)
         数组a:被分割的数组 
         num:平均分为几份(num确保a数组的行数可以平均分为num份,否则抛出异常)
    将数组沿着垂直方向分割成大小相同的子数组
    a为二维数组,分割后的子数组也为二维数组
3.深度分割 b = dsplit(数组a,num)
         数组a:被分割的数组 | np.shape(a) == (3,4,6)
         num:平均分为几份(num确保能被6整除,否则抛出异常)
    将数组沿着深度分割成大小相同的子数组
    a为二维数组,分割后的子数组也为二维数组
    b[0][0] == 第0个平面水平分割第0份(水平按照num分割) 
    b[1][0] == 第0个平面水平分割第1份(水平按照num分割)
    b[0][1] == 第1个平面水平分割第0份(水平按照num分割)
    b[1][1] == 第1个平面水平分割第1份(水平按照num分割)  
    !!! 此处例子数组b为三维数组 !!!  
'''
# 1.水平分割
a = np.arange(16).reshape(4, 4)
b = np.hsplit(a, 4)
# 2.垂直分割
c = np.vsplit(a, 2)
# 3.深度分割
d = np.arange(72).reshape(3, 4, 6)
e = np.dsplit(d, 3)
print("原始数组a:\n{}\n".format(a))
print("水平分割-数组b:\n{}\n".format(b))
print("垂直分割-数组c:\n{}\n".format(c))
print("深度原始数组d:\n{}\n".format(d[1]))
# print("深度分割-数组e:\n{}\n".format(e))
print("深度分割-子数组:\n{}\n".format(e[1][1]))
Пример #35
0
top, bottom = np.vsplit(x2, [1])
print(top, bottom)

top, bottom = np.split(x2, [1], axis=0)
print(top, bottom)

# Antra ašis

left, right = np.hsplit(x2, [3])
print(left, right)

np.split(x2, [3], axis=1)

# Trečia ašis

np.dsplit(x2.reshape(2, 6, 1), [2])

np.split(x2.reshape(2, 6, 1), [2], axis=2)

# ### Vektorizavimas (ufunc)
#
# NumPy veikia daug greičiau nei standartiniai python metodai.
# Taip yra dėl vektorinio skaičiavimo kompiliuotomis programomis.

# %timeit np.add(5, x1)

# %timeit [x + 5 for x in x1]

# %timeit (5 + x1) # Tas pats kaip np.add

# Dažnos operacijos
Пример #36
0
print('--------------------------数组分割--------------------------')
#(1) 水平分割 下面的代码将把数组沿着水平方向分割为3个相同大小的子数组:
print('水平分割')
print(a)
print(np.hsplit(a,3))
#对同样的数组,调用split函数并在参数中指定参数axis=1,对比一下结果:
print(np.split(a,3,axis=1))
#(2)垂直分割 vsplit函数将把数组沿着垂直方向分割:
print('垂直分割')
print(np.vsplit(a,3))
print(np.split(a,3,axis=0))
#(3) 深度分割 不出所料,dsplit函数将按深度方向分割数组。
print('深度分割')
a=np.arange(24).reshape(2,3,4)
print(a)
print(np.dsplit(a,4))
print('--------------------------数组的属性--------------------------')
print('数组的维度')
print(a.ndim)
print('数组的元素个数')
print(a.size)
print('我们可以使用tolist函数将NumPy数组转换成Python列表。')
print(a.tolist())
print('--------------------------常用函数--------------------------')
#(1) 单位矩阵,即主对角线上的元素均为1,其余元素均为0的正方形矩阵。
# 在NumPy中可以 用eye函数创建一个这样的二维数组,
# 我们只需要给定一个参数,用于指定矩阵中1的元素个数。 例如,创建2×2的数组:
i2=np.eye(2)
print(i2)
#使用savetxt函数将数据存储到文件中,当然我们需要指定文件名以及要保存的数组。
np.savetxt('eye.txt',i2)
Пример #37
0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
demo08_stack.py  组合与拆分
"""
import numpy as np

a = np.arange(1, 7).reshape(2, 3)
b = np.arange(7, 13).reshape(2, 3)
print(a)
print(b)
# 水平方向操作
c = np.hstack((a, b))
print(c)
a, b = np.hsplit(c, 2)

# 垂直方向操作
c = np.vstack((a, b))
print(c)
a, b = np.vsplit(c, 2)

# 深度方向操作
c = np.dstack((a, b))
print(c)
a, b = np.dsplit(c, 2)
print(a)
print(b)
Пример #38
0
print c
#Out:
#array([[[ 0,  1,  2],
#        [ 3,  4,  5],
#        [ 6,  7,  8]],
#
#       [[ 9, 10, 11],
#        [12, 13, 14],
#        [15, 16, 17]],
#
#       [[18, 19, 20],
#        [21, 22, 23],
#        [24, 25, 26]]])

print "In: dsplit(c, 3)"
print np.dsplit(c, 3)
#Out:
#[array([[[ 0],
#        [ 3],
#        [ 6]],
#
#       [[ 9],
#        [12],
#        [15]],
#
#       [[18],
#        [21],
#        [24]]]),
# array([[[ 1],
#        [ 4],
#        [ 7]],
Пример #39
0
#splitting
x = [1, 2, 3, 99, 99, 3, 2, 1]
x1, x2, x3 = np.split(x, [3, 5])
print(x1, x2, x3)

grid = np.arange(16).reshape((4, 4))
grid
upper, lower = np.vsplit(grid, [2])
print(upper)
print(lower)
left, right = np.hsplit(grid, [2])
print(left)
print(right)
x3 = np.random.randint(10, size=(3, 4, 5))
np.dsplit(x3, [2])


#vectorize!
def compute_reciprocals(values):
    output = np.empty(len(values))
    for i in range(len(values)):
        output[i] = 1.0 / values[i]
    return output

values = np.random.randint(1, 10, size=5)
compute_reciprocals(values)

big_array = np.random.randint(1, 100, size=1000000)
%timeit compute_reciprocals(big_array)
Пример #40
0
#垂直方向操作
c = np.vstack((a, b))
print(c)
a, b = np.vsplit(c, 2)  #拆成2份
print(a, '\n', b)

#水平方式操作
d = np.hstack((a, b))
print(d)
a, b = np.hsplit(d, 2)
print(a, '\n', b)

#深度方向
e = np.dstack((a, b))
print(e)
a, b = np.dsplit(e, 2)
print(a, '\n', b)

a = a.reshape(2, 3)
b = b.reshape(2, 3)
print(a, '\n', b)

c = np.concatenate((a, b), axis=1)
print(c)
#测试不同长度的数组组合
a = np.arange(1, 7)
b = np.arange(10, 16)
c = np.pad(b, pad_width=(0, 1), mode='constant', constant_values=-1)
print(a)
print(b)
print(c)
Пример #41
0
def quantize_rgb(im_orig, n_quant, n_iter):
    """
    A method for converting a colored picture to a quantize color scheme
    :param im_orig: The original colored picture to be quantize
    :param n_quant: The number of different colors the picture will be appeared with
    :param n_iter: The number of iterations for finding optimum quantize intensities
    :return: The quantized picture based on input values
    """
    final_error = 0
    if len(im_orig.shape) != 3:
        # error. we assume a colored picture
        return [-1, im_orig]

    [R, G, B] = np.dsplit(im_orig, im_orig.shape[2])
    red_hist = np.histogram(R, 256)[0]
    green_hist = np.histogram(G, 256)[0]
    blue_hist = np.histogram(B, 256)[0]

    # calculating the optimal borders and gravity points for each color
    red_err, red_q, red_z = quantize_helper(red_hist, n_quant, n_iter)
    green_err, green_q, green_z = quantize_helper(green_hist, n_quant, n_iter)
    blue_err, blue_q, blue_z = quantize_helper(blue_hist, n_quant, n_iter)
    # finding the maxmimal range of any given bucket of any given color
    red_max_range = max_range_finder(red_z)
    green_max_range = max_range_finder(green_z)
    blue_max_range = max_range_finder(blue_z)

    if red_max_range >= green_max_range and red_max_range >= blue_max_range:
        # red has max range
        # assigning new values
        final_red = quantize_apply(R, red_hist, red_q, red_z, n_quant)
        # using the z borders of the color with the highest range, applying to the rest of colors
        green_err_u, green_q_u, green_z_u = quantize_helper(
            green_hist, n_quant, 1, red_z)
        blue_err_u, blue_q_u, blue_z_u = quantize_helper(
            blue_hist, n_quant, 1, red_z)
        # applying the updated values of the other colors to create the final color array for each color
        final_green = quantize_apply(G, green_hist, green_q_u, green_z_u,
                                     n_quant)
        final_blue = quantize_apply(B, blue_hist, blue_q_u, blue_z_u, n_quant)

        final_error = red_err + green_err_u + blue_err_u
    else:
        if green_max_range >= red_max_range and green_max_range >= blue_max_range:
            final_green = quantize_apply(G, green_hist, green_q, green_z,
                                         n_quant)
            red_err_u, red_q_u, red_z_u = quantize_helper(
                red_hist, n_quant, 1, green_z)
            blue_err_u, blue_q_u, blue_z_u = quantize_helper(
                blue_hist, n_quant, 1, green_z)
            final_red = quantize_apply(R, red_hist, red_q_u, red_z_u, n_quant)
            final_blue = quantize_apply(B, blue_hist, blue_q_u, blue_z_u,
                                        n_quant)

            final_error = green_err + red_err_u + blue_err_u
        else:
            if blue_max_range >= red_max_range and blue_max_range >= green_max_range:
                final_blue = quantize_apply(B, blue_hist, blue_q, blue_z,
                                            n_quant)
                red_err_u, red_q_u, red_z_u = quantize_helper(
                    red_hist, n_quant, 1, blue_z)
                green_err_u, green_q_u, green_z_u = quantize_helper(
                    green_hist, n_quant, 1, blue_z)
                final_green = quantize_apply(G, green_hist, green_q_u,
                                             green_z_u, n_quant)
                final_red = quantize_apply(R, red_hist, red_q_u, red_z_u,
                                           n_quant)

                final_error = blue_err + green_err_u + red_err_u

    # merging all the colors to a new equalized picture
    eq_pic = np.concatenate((final_red, final_green, final_blue), axis=2)

    return [eq_pic, final_error]
Пример #42
0
print(c)
#Out:
#array([[[ 0,  1,  2],
#        [ 3,  4,  5],
#        [ 6,  7,  8]],
#
#       [[ 9, 10, 11],
#        [12, 13, 14],
#        [15, 16, 17]],
#
#       [[18, 19, 20],
#        [21, 22, 23],
#        [24, 25, 26]]])

print("In: dsplit(c, 3)")
print(np.dsplit(c, 3))
#Out:
#[array([[[ 0],
#        [ 3],
#        [ 6]],
#
#       [[ 9],
#        [12],
#        [15]],
#
#       [[18],
#        [21],
#        [24]]]),
# array([[[ 1],
#        [ 4],
#        [ 7]],
    matriz, [pos1, pos2, pos3], dimensión
)  # Separar una matriz en matrices respecto a la posición de la dimensión especificada en las posiciones de particion especificadas
np.hsplit(
    matriz, num_divisiones
)  # Separar una matriz en matrices respecto a la posición de la dimensión 1 en el numero de divisiónes especificado (Si las divisiones no son exactas da error)
np.hsplit(
    matriz, [pos1, pos2, pos3]
)  # Separar una matriz en matrices respecto a la posición de la dimensión 1 en las posiciones de particion especificadas
np.vsplit(
    matriz, num_divisiones
)  # Separar una matriz en matrices respecto a la posición de la dimensión 2 en el numero de divisiónes especificado (Si las divisiones no son exactas da error)
np.vsplit(
    matriz, [pos1, pos2, pos3]
)  # Separar una matriz en matrices respecto a la posición de la dimensión 2 en las posiciones de particion especificadas
np.dsplit(
    matriz, num_divisiones
)  # Separar una matriz en matrices respecto a la posición de la dimensión 3 en el numero de divisiónes especificado (Si las divisiones no son exactas da error)
np.dsplit(
    matriz, [pos1, pos2, pos3]
)  # Separar una matriz en matrices respecto a la posición de la dimensión 3 en las posiciones de particion especificadas

# Matrices y vectores por repetición (Mosaico)
np.title(
    matriz, (ver_rep, hor_rep)
)  # Repetir matriz en los ejes vertical y horizontal las veces especificadas
np.repeat(
    matriz, [rep1, rep2, repN], dimensión
)  # Repetir elementos de una matriz las veces especificadas respecto a la posición de la dimensión especificada
np.repeat(
    matriz, repeticiones, dimensión
)  # Contraer matriz a una dimensión repitiendo los elementos las veces especificadas
Пример #44
0
    def __call__(self, color, channel=None):
        """
        Calculates adjusted color value. Allows getting corrected values by
        making calls to instance.

        :param color: List of RGB values, scaled from -1 to 1, or color
         from a single channel.
        :param int channel: If color is passed as a single number, channel is
         the color channel.
        :return: Adjusted list of RGB values, or single adjusted color.
        """
        channel = None if channel == 3 else channel

        if channel is None:
            # if entire texture
            if len(numpy.shape(color)) == 3:
                has_alpha = bool(numpy.shape(color)[2] == 4)

                adj_color = numpy.copy(color)

                size_x = adj_color.shape[0]
                size_y = adj_color.shape[1]

                if has_alpha:
                    r, g, b, a = numpy.dsplit(adj_color, 4)
                else:
                    r, g, b = numpy.dsplit(adj_color, 3)

                r = r.flatten()
                g = g.flatten()
                b = b.flatten()
                # print 'red correcting.....',
                r = self.r_correct(r)
                # print 'done'
                # print 'green correcting...',
                g = self.g_correct(g)
                # print 'done'
                # print 'blue correcting....',
                b = self.b_correct(b)
                # print 'done\n'
                '''
                q = Queue()
                r_proc = Process(target=self.r_correct, args=(r, q))
                r_proc.start()
                g_proc = Process(target=self.g_correct, args=(g, q))
                g_proc.start()
                b_proc = Process(target=self.b_correct, args=(b, q))
                b_proc.start()

                r = q.get()
                g = q.get()
                b = q.get()

                r_proc.join()
                g_proc.join()
                b_proc.join()
                '''

                r = r.reshape(size_x, size_y, 1)
                g = g.reshape(size_x, size_y, 1)
                b = b.reshape(size_x, size_y, 1)

                if has_alpha:
                    adj_color = numpy.dstack([r, g, b, a])
                else:
                    adj_color = numpy.dstack([r, g, b])

            # if single color
            elif len(numpy.shape(color)) == 1:
                # ignore alpha
                r = color[0]
                g = color[1]
                b = color[2]

                # print 'red correcting.....',
                r_adj = self.r_correct(r)
                # print 'done'
                # print 'green correcting...',
                g_adj = self.g_correct(g)
                # print 'done'
                # print 'blue correcting....',
                b_adj = self.b_correct(b)
                # print 'done\n'

                adj_color = color[:]

                adj_color[0] = r_adj
                adj_color[1] = g_adj
                adj_color[2] = b_adj

                # add ceiling/floor
                for i in range(len(color)):
                    if adj_color[i] >= 1:
                        adj_color[i] = 1.0
                    elif adj_color[i] <= -1:
                        adj_color[i] = -1.0

            elif len(numpy.shape(color)) == 2:

                # if grayscale image
                if numpy.shape(color)[1] != 3:
                    print(
                        '\nWARNING: Cannot gamma correct grayscale .iml images.'
                    )
                    adj_color = color

                # if noise checkerboard
                elif numpy.shape(color)[1] == 3:
                    r, g, b = numpy.hsplit(color, 3)

                    r = r.flatten()
                    g = g.flatten()
                    b = b.flatten()

                    # print 'red correcting.....',
                    r = self.r_correct(r)
                    # print 'done'
                    # print 'green correcting...',
                    g = self.g_correct(g)
                    # print 'done'
                    # print 'blue correcting....',
                    b = self.b_correct(b)
                    # print 'done\n'

                    adj_color = numpy.dstack([r, g, b])[0]

        # if single channel
        elif channel is not None:
            if channel == 0:
                # print 'red correcting.....',
                adj = self.r_correct(color)
                # print 'done'
            if channel == 1:
                # print 'green correcting...',
                adj = self.g_correct(color)
                # print 'done'
            if channel == 2:
                # print 'blue correcting....',
                adj = self.b_correct(color)
                # print 'done'

            adj_color = adj

            # add ceiling/floor
            if adj_color >= 1:
                adj_color = 1.0
            elif adj_color <= -1:
                adj_color = -1.0

        return adj_color
Пример #45
0
import numpy as np

matrix_x = [[[2, 4, 6, 8], [10, 12, 14, 16], [18, 20, 22, 24],
             [26, 28, 30, 32]]]
matrix_x = np.array(matrix_x)

matrix_x = np.dsplit(matrix_x, 2)
print(matrix_x)

matrix_x = [[[1, 2, 1], [3, 4, 1], [1, 1, 1]]]
matrix_x = np.array(matrix_x)
matrix = np.dsplit(matrix_x, [1, 2])
print(matrix)

matrix_dois = np.dsplit(matrix_x, [2, 4])
print(matrix_dois)

matrix_tres = np.dsplit(matrix_x, [2, 4, 5])
print(matrix_tres)

matrix_quatro = np.dsplit(matrix_x, [2, 4, 5])
print(matrix_quatro)
Пример #46
0
def split_image_rgb(image):
    # Channel order: R, G, B
    return np.dsplit(image, image.shape[-1])
np.row_stack((a, b))
np.row_stack((a, b)) == np.vstack((a, b))

# splitting arrays(배열 쪼개기)
a = np.arange(9).reshape(3, 3)
print(a)

np.hsplit(a, 3)  # 수평으로 쪼개기
np.split(a, 3, axis=1)

np.vsplit(a, 3)  # 수직으로 쪼개기
np.split(a, 3, axis=0)

c = np.arange(27).reshape(3, 3, 3)
print(c)
np.dsplit(c, 3)  # 깊이 우선 쪼개기(배열의 랭크는 3)

# numpy array attributes(배열 속성)
b = np.arange(24).reshape(2, 12)
print(b)

print(b.ndim)  # 배열의 차원 값
print(b.size)  # 원소의 갯수
print(b.itemsize)  # 배열원소의 바이트 수
print(b.nbytes)  # 모든 원소의 바이트 수
print(b.size * b.itemsize)  # 모든 원소의 바이트 수

b.resize(6, 4)
print(b)
print(b.T)  # 전치 = transpose()
Пример #48
0
    def FuseRGBD_optimized(self, Image, Pose, s=1):
        """
        Fuse data of 3D TSDF model with RGBD image CPU optimize
        :param Image: RGBD image to update to its surfaces
        :param Pose: transform from the first camera pose to the last camera pose
        :param s:  subsampling factor
        :return: none
        NOT USED FUNCTIONS
        """
        Transform = Pose  #LA.inv(Pose)

        nu = 0.05

        column_index_ref = np.array([
            np.array(range(self.Size[1])) for _ in range(self.Size[0])
        ])  # x coordinates
        column_index_ref = (column_index_ref - self.c_x) / self.dim_x

        line_index_ref = np.array([
            x * np.ones(self.Size[1], np.int) for x in range(self.Size[0])
        ])  # y coordinates
        line_index_ref = (line_index_ref - self.c_y) / self.dim_y

        voxels2D = np.dstack((line_index_ref, column_index_ref))

        normVtxInput = Image.Vtx[:, :, 0:3] * Image.Vtx[:, :, 0:3]
        distVtxInput = np.sqrt(normVtxInput.sum(axis=2))

        for z in range(self.Size[2] / s):
            curr_z = (z - self.c_z) / self.dim_z
            stack_z = curr_z * np.ones(
                (self.Size[0], self.Size[1], 1), dtype=np.float32)

            stack_pix = np.ones((self.Size[0], self.Size[1]), dtype=np.float32)
            stack_pt = np.ones((self.Size[0], self.Size[1], 1),
                               dtype=np.float32)
            pix = np.zeros(
                (self.Size[0], self.Size[1], 2), dtype=np.float32
            )  # recorded projected location of all voxels in the current slice
            pix = np.dstack((pix, stack_pix))
            pt = np.dstack((voxels2D, stack_z))
            pt = np.dstack(
                (pt,
                 stack_pt))  # record transformed 3D positions of all voxels
            pt = np.dot(Transform, pt.transpose(0, 2, 1)).transpose(1, 2, 0)

            #if (pt[2] != 0.0):
            lpt = np.dsplit(pt, 4)
            lpt[2] = General.in_mat_zero2one(lpt[2])

            # if in 1D pix[0] = pt[0]/pt[2]
            pix[::s, ::s, 0] = (lpt[0] / lpt[2]).reshape(
                (self.Size[0], self.Size[1]))
            # if in 1D pix[1] = pt[1]/pt[2]
            pix[::s, ::s, 1] = (lpt[1] / lpt[2]).reshape(
                (self.Size[0], self.Size[1]))
            pix = np.dot(
                Image.intrinsic,
                pix[0:self.Size[0],
                    0:self.Size[1]].transpose(0, 2, 1)).transpose(1, 2, 0)
            column_index = (np.round(pix[:, :, 0])).astype(int)
            line_index = (np.round(pix[:, :, 1])).astype(int)

            # create matrix that have 0 when the conditions are not verified and 1 otherwise
            cdt_column = (column_index > -1) * (column_index < Image.Size[1])
            cdt_line = (line_index > -1) * (line_index < Image.Size[0])
            line_index = line_index * cdt_line
            column_index = column_index * cdt_column

            empty_mat = (Image.Vtx[:, :, 2] != 0.0)
            #normPt = pt[:,:,0:3]*pt[:,:,0:3]
            #distPt = np.sqrt(normPt.sum(axis=2))
            #diff_Vtx = distPt[:,:] - distVtxInput[line_index[:][:], column_index[:][:]]
            diff_Vtx = pt[:, :, 2] - Image.Vtx[line_index[:][:],
                                               column_index[:][:], 2]
            diff_Vtx = diff_Vtx[:, :] * empty_mat[
                line_index[:][:], column_index[:][:]] - ~empty_mat[
                    line_index[:][:], column_index[:][:]]

            self.TSDF[:, :, z] = diff_Vtx / nu
def k_stability_analysis(k, forwardjump):
   w = rw.PVReadWeights(sys.argv[1])
   feature = k - 1
   count = 0
   d = np.zeros((nxp,nyp))

   w.rewind()
   for ko in np.arange(numpat):
      kxOn = conv.kxPos(ko, nx, ny, nf)
      kyOn = conv.kyPos(ko, nx, ny, nf)
      p = w.next_patch()
      if marginstart < kxOn < marginend:
         if marginstart < kyOn < marginend:
            if cluster[count] == feature:
               e = p
               e = e.reshape(nxp, nyp)
               numrows, numcols = e.shape
               count = count + 1
               patpos = w.file.tell()
               patchposition.append(patpos)
            else:
               e = d
               count = count + 1
         else:
            e = d
      else:
         e = d
      x = (nxp) * (ko % nx)
      y = ( nyp) * (ko / nx)

      im2[y:y+nyp, x:x+nxp] = e

   ##########
   # Find Valuse of K-cluster[x] Patches
   ##########


   w = rw.PVReadWeights(sys.argv[3])
   wOff = rw.PVReadWeights(sys.argv[4])
   w.rewind()
   wOff.rewind()
   patpla = patchposition
   lenpat = len(patpla)


   number = w.numPatches 
   count = 0

   exp = []
   expOff = []
   exppn = []
   exppnOff = []

   body = w.recSize + 4
   hs = w.headerSize
   filesize = os.path.getsize(sys.argv[3])
   bint = filesize / body


   bint = bint - forwardjump

   if forwardjump == 0:
      4
   else:
      leap = ((body * forwardjump) + (100 * forwardjump))
      w.file.seek(leap, os.SEEK_CUR)






   for i in range(bint):
      if i == 0:
         for j in range(lenpat):
            if j == 0:
               go = patpla[0] - hs - 20
               w.file.seek(go, os.SEEK_CUR)
               wOff.file.seek(go, os.SEEK_CUR)
               p = w.next_patch()
               pOff = wOff.next_patch()
               if len(p) == 0:
                  print"STOPPEP SUPER  EARLY"
                  sys.exit()
               don = p
               doff = pOff
               

               d = np.append(don, doff)
               fallpat = d


               #p = w.normalize(d)
               #pn = p
               #pn = np.reshape(np.matrix(pn),(1,32))
               #p = np.reshape(np.matrix(p),(32,1))
               #pm = pn * p
               #exppn = np.append(exppn, pn)
               #exp = np.append(exp,pm)
               
            else:
               pospost = patpla[j - 1]
               poscur = patpla[j]
               jump = poscur - pospost - 20
               w.file.seek(jump, os.SEEK_CUR)
               wOff.file.seek(jump, os.SEEK_CUR)
               p = w.next_patch()
               pOff = wOff.next_patch()
               if len(pOff) == 0:
                  print"STOPPED EARLY"
                  sys.exit()
               don = p
               doff = pOff
               d = np.append(don, doff)
               nallpat = d
               fallpat = np.vstack((fallpat, nallpat))


               #p = w.normalize(d)
               #pn = p
               #pn = np.reshape(np.matrix(pn),(1,32))
               #p = np.reshape(np.matrix(p),(32,1))
               #pm = pn * p
               #exppn = np.append(exppn, pn)
               #exp = np.append(exp,pm)
      else:
         count = 0
         prejump = body - patpla[lenpat-1] + hs
         w.file.seek(prejump, os.SEEK_CUR)
         wOff.file.seek(prejump, os.SEEK_CUR)
         for j in range(lenpat):
            if j == 0:
               go = patpla[0] - 4 - 20
               w.file.seek(go, os.SEEK_CUR)
               wOff.file.seek(go, os.SEEK_CUR)
               p = w.next_patch()
               pOff = wOff.next_patch()
               test = p
               if len(test) == 0:
                  print "stop"
                  input('Press Enter to Continue')
                  sys.exit()
               don = p
               doff = pOff
               d = np.append(don, doff)
               nfallpat = d

               #p = w.normalize(d)
               #p = np.reshape(np.matrix(p),(32,1))
               #j1 = 0
               #j2 = 32
               #pm = np.matrix(exppn[j1:j2]) * p
               #exp = np.append(exp,pm)
               #count += 1
            else:
               pospost = patpla[j - 1]
               poscur = patpla[j]
               jump = poscur - pospost - 20
               w.file.seek(jump, os.SEEK_CUR)
               wOff.file.seek(jump, os.SEEK_CUR)
               p = w.next_patch()
               pOff = wOff.next_patch()
               test = pOff
               if len(test) == 0:
                  print "stop"
                  input('Press Enter to Continue')
                  sys.exit()
               don = p
               doff = pOff
               d = np.append(don, doff)
               nfallpat = np.vstack((nfallpat, d))




               #p = w.normalize(d)
               #p = np.reshape(np.matrix(p),(32,1))
               #j1 = 32 * j
               #j2 = 32 * (j +1)
               #pm = np.matrix(exppn[j1:j2]) * p
               #exp = np.append(exp,pm)
               #count += 1  

         fallpat = np.dstack((fallpat, nfallpat))
   #print fallpat
   #print np.shape(fallpat)
   #print bint

   exp = []
   exppn = []
   dsallpat = np.dsplit(fallpat, bint)
   for i in range(bint):
      postds = dsallpat[-(i + 1)]
      sh = np.shape(postds)
      sh = sh[0]
      #print sh
      if i == 0:
         for j in range(sh):
            if j == 0:
               d = postds[j]
               p = w.normalize(d)
               pn = p
               pn = np.reshape(np.matrix(pn), (1,32))
               p = np.reshape(np.matrix(p), (32,1))
               pm = pn * p
               exppn = np.append(exppn, pn)
               exp = np.append(exp, pm)
            else:
               d = postds[j]
               p = w.normalize(d)
               pn = p
               pn = np.reshape(np.matrix(pn),(1,32))
               p = np.reshape(np.matrix(p),(32,1))
               pm = pn * p
               exppn = np.append(exppn, pn)
               exp = np.append(exp, pm)
      else:
         for j in range(sh):
            if j == 0:
               d = postds[j]
               p = w.normalize(d)
               p = np.reshape(np.matrix(p),(32,1))
               j1 = 0
               j2 = 32
               pm = np.matrix(exppn[j1:j2]) * p
               exp = np.append(exp, pm)
               count += 1
            else:
               d = postds[j]
               p = w.normalize(d)
               p = np.reshape(np.matrix(p),(32,1))
               j1 = 32 * j
               j2 = 32 * (j + 1)
               pm = np.matrix(exppn[j1:j2]) * p
               exp = np.append(exp, pm)
               count += 1







   ##########
   # Find Average of K-cluster[x] Weights
   ##########


   thenumber = lenpat
   thenumberf = float(thenumber)

   patpla = exp
   lenpat = len(patpla)


   howlong = lenpat / thenumber

   total = []
   logtotal = []

   for i in range(thenumber):
      subtotal = []
      logsubtotal = []
      for j in range(howlong):
         if i == 0:
            value = patpla[i + (thenumber * j)]
            total = np.append(total, value)
            logvalue = patpla[i + (thenumber * j)]
            logvalue = math.log10(logvalue)
            logtotal = np.append(logtotal, logvalue)
         else:
            value = patpla[i + (thenumber * j)]
            subtotal = np.append(subtotal, value) 
            logvalue = patpla[i + (thenumber * j)]
            logvalue = math.log10(logvalue)
            logsubtotal = np.append(logsubtotal, logvalue)
        
      if i > 0:
         total = total + subtotal
      if i > 0:
         logtotal = logtotal + logsubtotal


   total = total / thenumberf
   logtotal = logtotal / thenumberf


   global total1
   global total2
   global total3
   global total4
   global total5
   global total6
   global total7
   global total8
   global total9
   global total10
   global total11
   global total12
   global total13
   global total14
   global total15
   global total16
   global logtotal1
   global logtotal2
   global logtotal3
   global logtotal4
   global logtotal5
   global logtotal6
   global logtotal7
   global logtotal8
   global logtotal9
   global logtotal10
   global logtotal11
   global logtotal12
   global logtotal13
   global logtotal14
   global logtotal15
   global logtotal16

   
   if feature == 0:
      total1=[0]
      total2=[0]
      total3=[0]
      total4=[0]
      total5=[0]
      total6=[0]
      total7=[0]
      total8=[0]
      total9=[0]
      total10=[0]
      total11=[0]
      total12=[0]
      total13=[0]
      total14=[0]
      total15=[0]
      total16=[0]
      logtotal1=[0]
      logtotal2=[0]
      logtotal3=[0]
      logtotal4=[0]
      logtotal5=[0]
      logtotal6=[0]
      logtotal7=[0]
      logtotal8=[0]
      logtotal9=[0]
      logtotal10=[0]
      logtotal11=[0]
      logtotal12=[0]
      logtotal13=[0]
      logtotal14=[0]
      logtotal15=[0]
      logtotal16=[0]



   #global logtotal1
   #global logtotal2
   #global logtotal3
   #global logtotal4
   #global logtotal5
   #global logtotal6
   #global logtotal7
   #global logtotal8
   #global logtotal9
   #global logtotal10
   #global logtotal11
   #global logtotal12
   #global logtotal13
   #global logtotal14
   #global logtotal15
   #global logtotal16

   if feature == 0:
      total1 = total 
      logtotal1 = logtotal
   if feature == 1:
      total2 = total
      logtotal2 = logtotal
   if feature == 2:
      total3 = total
      logtotal3 = logtotal
   if feature == 3:
      total4 = total
      logtotal4 = logtotal
   if feature == 4:
      total5 = total
      logtotal5 = logtotal
   if feature == 5:
      total6 = total
      logtotal6 = logtotal
   if feature == 6:
      total7 = total
      logtotal7 = logtotal
   if feature == 7:
      total8 = total
      logtotal8 = logtotal
   if feature == 8:
      total9 = total
      logtotal9 = logtotal
   if feature == 9:
      total10 = total
      logtotal10 = logtotal
   if feature == 10:
      total11 = total
      logtotal11 = logtotal
   if feature == 11:
      total12 = total
      logtotal12 = logtotal
   if feature == 12:
      total13 = total
      logtotal13 = logtotal
   if feature == 13:
      total14 = total
      logtotal14 = logtotal
   if feature == 14:
      total15 = total
      logtotal15 = logtotal
   if feature == 15:
      total16 = total
      logtotal16 = logtotal

   return
Пример #50
0
def shift_img(img, device, number, side="right", debug=None):
    """this function allows you to shift an image over without changing dimensions

    Inputs:
    img     = image to mask
    number  = number of rows or columns to add
    side   = "top", "bottom", "right", "left" where to add the rows or columns to
    device  = device counter
    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device  = device number
    newmask = image mask

    :param img: numpy array
    :param device: int
    :param number: int
    :param side: str
    :param debug: str
    :return newmask: numpy array
    """
    device += 1

    number = number - 1

    if number < 0:
        fatal_error("x and y cannot be negative numbers or non-integers")

    # get the sizes of the images
    if len(np.shape(img)) == 3:
        ix, iy, iz = np.shape(img)
        ori_img = np.copy(img)
    else:
        ix, iy = np.shape(img)
        ori_img = np.dstack((img, img, img))

    if side == "top":
        top = np.zeros((number, iy, 3), dtype=np.uint8)
        adjust = ix - number
        adjusted_img = np.vstack((top, ori_img[0:adjust, 0:]))

    if side == 'bottom':
        bottom = np.zeros((number, iy, 3), dtype=np.uint8)
        adjusted_img = np.vstack((ori_img[number:, 0:], bottom))

    if side == 'right':
        right = np.zeros((ix, number, 3), dtype=np.uint8)
        adjusted_img = np.hstack((ori_img[0:, number:], right))
    if side == 'left':
        left = np.zeros((ix, number, 3), dtype=np.uint8)
        adjust = iy - number
        adjusted_img = np.hstack((left, ori_img[0:, 0:adjust]))


    if len(np.shape(img)) == 2:
        adjusted_img, channel2, channel3 = np.dsplit(adjusted_img, 3)
    if debug == 'print':
        print_image(adjusted_img, (str(device) + "_shifted_img.png"))
    elif debug == 'plot':
        if len(np.shape(adjusted_img)) == 3:
            plot_image(adjusted_img)
        else:
            plot_image(adjusted_img, cmap='gray')

    return device, adjusted_img
Пример #51
0
                    gaze_pix_positions):
                frame_index_alt = frame_index_alt + 1
                gaze_frame_n = gaze_pix_positions[frame_index_alt][0]

            if gaze_frame_n == frame_index + 1:

                gaze_frame_n = gaze_pix_positions[frame_index_alt][0]

                gaze_frame_n = gaze_pix_positions[frame_index_alt][0]

                gaze_positions_x = gaze_pix_positions[frame_index_alt][1]
                gaze_positions_y = gaze_pix_positions[frame_index_alt][2]
                gaze_positions_time = gaze_pix_positions[frame_index_alt][3]

                frame_xyz = cv2.cvtColor(frame, cv2.COLOR_RGB2XYZ)
                frame_x, frame_y, frame_z = np.dsplit(frame_xyz, 3)
                lum = np.average(frame_y) / 255
                frame_blurr = frame_y
                #frame_blurr=cv2.GaussianBlur(frame_y,(15,15),cv2.BORDER_DEFAULT)
                frame_blurr = cv2.blur(frame_y, (11, 11), cv2.BORDER_DEFAULT)
                frame_blurr = cv2.blur(frame_blurr, (21, 21),
                                       cv2.BORDER_DEFAULT)
                frame_blurr = cv2.blur(frame_blurr, (41, 41),
                                       cv2.BORDER_DEFAULT)
                #frame_blurr=cv2.blur(frame_blurr,(81,81),cv2.BORDER_DEFAULT)

                lumBlurr = np.average(frame_blurr) / 255

                lumCoeff = lum / lumBlurr

                for i in range(0, len(gaze_positions_time)):
Пример #52
0
def onp_dsplit(input_array):
    a = onp.dsplit(input_array, indices_or_sections=3)
    b = onp.dsplit(input_array, indices_or_sections=(-10, -4, 5, 10))
    c = onp.dsplit(input_array, indices_or_sections=[0, 2])
    return a, b, c
Пример #53
0
    def return_subset_for_nn(self, sigtype, weights, textures):
        combos = []
        for weight in list(weights):
            for texture in list(textures):
                combos.append((weight, texture))

        self.filtered_trial_count = 0
        if sigtype == 'eeg':
            first = True
            for i, array in enumerate(
                    np.dsplit(self.eeg_tuple[0], self.trial_count)):
                for combo in combos:
                    if (self.eeg_tuple[1][i, combo[0] - 1] !=
                            0) and (self.eeg_tuple[2][i, combo[1] - 1] != 0):
                        self.filtered_trial_count += 1
                        if first:
                            self.filtered_eeg = array
                            self.filtered_weights = self.eeg_tuple[1][i]
                            self.filtered_textures = self.eeg_tuple[2][i]
                            first = False
                        else:
                            self.filtered_eeg = np.dstack(
                                (self.filtered_eeg, array))
                            self.filtered_weights = np.vstack(
                                (self.filtered_weights, self.eeg_tuple[1][i]))
                            self.filtered_textures = np.vstack(
                                (self.filtered_textures, self.eeg_tuple[2][i]))
            eegdata = [
                self.filtered_eeg, self.filtered_weights,
                self.filtered_textures
            ]
            return tuple(self.reshape_for_nn(eegdata, 'eeg'))
        elif sigtype == 'emg':
            first = True
            for i, array in enumerate(
                    np.dsplit(self.emg_tuple[0], self.trial_count)):
                for combo in combos:
                    if (self.emg_tuple[1][i, combo[0] - 1] !=
                            0) and (self.emg_tuple[2][i, combo[1] - 1] != 0):
                        self.filtered_trial_count += 1
                        if first:
                            self.filtered_emg = array
                            self.filtered_weights = self.emg_tuple[1][i]
                            self.filtered_textures = self.emg_tuple[2][i]
                            first = False
                        else:
                            self.filtered_emg = np.dstack(
                                (self.filtered_emg, array))
                            self.filtered_weights = np.vstack(
                                (self.filtered_weights, self.emg_tuple[1][i]))
                            self.filtered_textures = np.vstack(
                                (self.filtered_textures, self.emg_tuple[2][i]))
            emgdata = [
                self.filtered_emg, self.filtered_weights,
                self.filtered_textures
            ]
            return tuple(self.reshape_for_nn(emgdata, 'emg'))
        elif sigtype == 'both':
            first = True
            for i, array in enumerate(
                    np.dsplit(self.eeg_tuple[0], self.trial_count)):
                for combo in combos:
                    if (self.eeg_tuple[1][i, combo[0] - 1] !=
                            0) and (self.eeg_tuple[2][i, combo[1] - 1] != 0):
                        self.filtered_trial_count += 1
                        if first:
                            self.filtered_eeg = array
                            self.filtered_weights = self.eeg_tuple[1][i]
                            self.filtered_textures = self.eeg_tuple[2][i]
                            first = False
                        else:
                            self.filtered_eeg = np.dstack(
                                (self.filtered_eeg, array))
                            self.filtered_weights = np.vstack(
                                (self.filtered_weights, self.eeg_tuple[1][i]))
                            self.filtered_textures = np.vstack(
                                (self.filtered_textures, self.eeg_tuple[2][i]))
            first = True
            for i, array in enumerate(
                    np.dsplit(self.emg_tuple[0], self.trial_count)):
                for combo in combos:
                    if (self.emg_tuple[1][i, combo[0] - 1] !=
                            0) and (self.emg_tuple[2][i, combo[1] - 1] != 0):
                        if first:
                            self.filtered_emg = array
                            first = False
                        else:
                            self.filtered_emg = np.dstack(
                                (self.filtered_emg, array))
            eegdata = [
                self.filtered_eeg, self.filtered_weights,
                self.filtered_textures
            ]
            emgdata = [
                self.filtered_emg, self.filtered_weights,
                self.filtered_textures
            ]
            return tuple(self.reshape_for_nn(eegdata, 'eeg')), tuple(
                self.reshape_for_nn(emgdata, 'emg'))
Пример #54
0
	filename_list = filenames

chunk_num = len(filename_list)/chunk_size

print("Creating "+str(chunk_num)+" chunks for the dataset")

with h5py.File(dset_name, 'w') as hf:
	color_dset = hf.create_dataset('ycbcr_imgs', (10, 224, 224, 3), maxshape=(None, 224, 224, 3))

	for i in range(len(filename_list)):
	        filepath = os.path.join(root_dir+source_dir+'/', filename_list[i])
	        image = ndimage.imread(filepath, mode="RGB")
		image = Image.fromarray(image)
	        image = ImageOps.fit(image, size, Image.ANTIALIAS)
		image = np.reshape(np.array(image), [224, 224, 3])
		list = np.dsplit(image, 3)
		image = np.dstack(_ycc(list[0], list[1], list[2]))
		image = image/255
	        color_images.append(image)
	        # First condition allows for partial chunks if specified as a param."
		if ((partial_chunks) and ((i + 1) == len(filename_list))) or ((i + 1) % chunk_size == 0):
		    if (i + 1) == len(filename_list): #If this is a partial chunk, set the offset to that length
			backwards_offset = len(filename_list) % chunk_size
		    else:
			backwards_offset = chunk_size
		    print("Chunk "+str((i+1)/chunk_size)+" of "+str(chunk_num)+".")
		    # Careful for off-by-one errors here.
		    color_dset.resize(i+1, axis=0)
		    color_images = np.array(color_images)
		    color_dset[(i+1)-backwards_offset:, :, :, :] = color_images
		    color_images = []
Пример #55
0
print(f, g, h, sep='\n')

i = np.hstack((a, b, c))
print('i')
print(i)
j = np.concatenate((a, b, c), axis=1)
print('j')
print(j)
k, l, m = np.hsplit(j, 3)
print('k, l, m')
print(k, l, m, sep='\n')

n = np.dstack((a, b))
print('n')
print(n)
o, p = np.dsplit(n, 2)
print('o, p')
print(o, p, sep='\n')
print('o.T')
print(o.T)
print(o.T[0].T, p.T[0].T, sep='\n')

q = np.arange(1, 4)
r = np.arange(4, 7)
s = np.arange(7, 10)
print('q, r, s')
print(q, r, s, sep='\n')
t = np.row_stack((q, r, s))
print('t')
print(t)
u = np.column_stack((q, r, s))
Пример #56
0
# 水平方向
c = np.hstack((a, b))
print(c)

d, e, f = np.hsplit(c, 3)
print(d)
print(e)
print(f)

# 垂直方向
c = np.vstack((a, b))
print(c)
d, e = np.vsplit(c, 2)
print(d)
print(e)

# 深度方向
print('*' * 30)
c = np.dstack((a, b))
print(c)
d, e = np.dsplit(c, 2)
print(d)
print(e)

# 一位数组的组合
a = np.arange(1, 9)
b = np.arange(9, 17)
print(a)
print(b)
print(np.row_stack((a, b)))  # 形成2行
print(np.column_stack((a, b)))  # 形成2列
Пример #57
0
def YUVSplit(image):
    [Y, U, V] = np.dsplit(image, image.shape[-1])
    Y = Y.flatten().reshape(image.shape[0], image.shape[1])
    U = U.flatten().reshape(image.shape[0], image.shape[1])
    V = V.flatten().reshape(image.shape[0], image.shape[1])
    return Y, U, V
Пример #58
0
 def compute_luminance(self, img):
     channels = np.dsplit(img, 3)
     lum = np.array(0.2126 * channels[0] + 0.7152 * channels[1] + 0.0722 * channels[2])
     return lum
Пример #59
0
import numpy as np

x=np.array([[[1,2],[3,4]],
            [[5,6],[7,8]]])

print('array_split axis=0:\n', np.array_split(x,2,axis=0))
print('array_split axis=1:\n', np.array_split(x,2,axis=1))


x1=np.array([1,2,3,4,5,6,7])
print('split axis=0:\n', np.split(x1,[3,6],axis=0))


print('horizontal split=\n', np.hsplit(x,2))
print('vertical split=\n', np.vsplit(x,2))
print('depth split=\n', np.dsplit(x,2))
Пример #60
0
imageHSV = cv2.cvtColor(imageRGBCrop,cv2.COLOR_RGB2HSV)
def normalizeNDMatrix (input_matrix):
    channelA,channelB,channelC = numpy.dsplit(input_matrix,3)
    flatA = numpy.ndarray.flatten(channelA).astype(numpy.double)
    flatB = numpy.ndarray.flatten(channelB).astype(numpy.double)
    flatC = numpy.ndarray.flatten(channelC).astype(numpy.double)
    channelA,channelB,channelC = channelA/max(flatA),channelB/max(flatB),channelC/max(flatC)
    channelB = channelB - 0.0747
    return channelA,channelB,channelC

channelH,channelS,channelV = normalizeNDMatrix(imageHSV)

# Cropped RGB to Lab (CIE)
imageGaussFilt = cv2.GaussianBlur(imageRGBCrop,(3,3),3)
imageLAB = cv2.cvtColor(imageGaussFilt,cv2.COLOR_RGB2Lab)
channelL,channelA,channelB = numpy.dsplit(imageLAB,3)
meanLDim = numpy.squeeze(numpy.asarray(channelL)).mean()
meanADim = numpy.squeeze(numpy.asarray(channelA)).mean()
meanBDim = numpy.squeeze(numpy.asarray(channelB)).mean()
imageLABContrast = (channelL - meanLDim)**2 + (channelA - meanADim)**2 + (channelB - meanBDim)**2
emptyMatrix = numpy.zeros(imageLABContrast.shape)
imageLABGray = cv2.normalize(imageLABContrast,emptyMatrix,1.0,0.0,cv2.NORM_MINMAX)


## Saliency Map Creation
# Resize LAB Image
cropsizeX,cropsizeY,cropsizeZ = imageLABGray.shape
downscaleTuple = (int(numpy.ceil(cropsizeX/6.0)),int(numpy.ceil(cropsizeY/6.0)))
imageLABResize = cv2.resize(imageLABGray,downscaleTuple)]

# Spectral Residual