Example #1
0
def test_uint_indexing():
    """
    Test that accessing a row with an unsigned integer
    works as with a signed integer.  Similarly tests
    that printing such a row works.

    This is non-trivial: adding a signed and unsigned
    integer in numpy results in a float, which is an
    invalid slice index.

    Regression test for gh-7464.
    """
    t = table.Table([[1., 2., 3.]], names='a')
    assert t['a'][1] == 2.
    assert t['a'][np.int(1)] == 2.
    assert t['a'][np.uint(1)] == 2.
    assert t[np.uint(1)]['a'] == 2.

    trepr = ['<Row index=1>',
             '   a   ',
             'float64',
             '-------',
             '    2.0']

    assert repr(t[1]).splitlines() == trepr
    assert repr(t[np.int(1)]).splitlines() == trepr
    assert repr(t[np.uint(1)]).splitlines() == trepr
 def get_panorama_col_from_azimuth(self, azimuth):
     '''
     @param azimuth: The target azimuth angle (in radians) for which the corresponding col in the panoramic image is to be found.
     @retval col: The valid col number (first col is index 0 and last is width-1) in the panorama where the azimuth maps to.
     '''
     azimuth_filtered = np.mod(azimuth, 2.0 * np.pi)  # Filter input azimuth so values are only positive angles between 0 and 2PI
     arc_length = self.cyl_radius * azimuth_filtered
     col = np.uint(self.cols - 1 - np.uint(arc_length / self.pixel_size))
     return col
    def __init__(self,timeSeries=None,
                 lenSeries=2**18,
                 numChannels=1,
                 fMin=400,fMax=800,
                 sampTime=None,
                 noiseRMS=0.1):
        """ Initializes the AmplitudeTimeSeries instance. 
        If a array is not passed, then a random whitenoise dataset is generated.
        Inputs: 
        Len -- Number of time data points (usually a power of 2) 2^38 gives about 65 seconds 
        of 400 MHz sampled data
        The time binning is decided by the bandwidth
        fMin -- lowest frequency (MHz)
        fMax -- highest frequency (MHz)
        noiseRMS -- RMS value of noise (TBD)
        noiseAlpha -- spectral slope (default is white noise) (TBD)
        ONLY GENERATES WHITE NOISE RIGHT NOW!
        """
        self.shape = (np.uint(numChannels),np.uint(lenSeries))
        self.fMax = fMax
        self.fMin = fMin        
        
        if sampTime is None:
            self.sampTime = np.uint(numChannels)*1E-6/(fMax-fMin)
        else:
            self.sampTime = sampTime

        if timeSeries is None:
            # then use the rest of the data to generate a random timeseries
            if VERBOSE:
                print "AmplitudeTimeSeries __init__ did not get new data, generating white noise data"

            self.timeSeries = np.complex64(noiseRMS*(np.float16(random.standard_normal(self.shape))
                                                     +np.float16(random.standard_normal(self.shape))*1j)/np.sqrt(2))
            
        else:
            if VERBOSE:
                print "AmplitudeTimeSeries __init__ got new data, making sure it is reasonable."

            if len(timeSeries.shape) == 1:
                self.shape = (1,timeSeries.shape[0])
                
            else:
                self.shape = timeSeries.shape

            self.timeSeries = np.reshape(np.complex64(timeSeries),self.shape)
            
            self.fMin = fMin
            self.fMax = fMax

            if sampTime is None:
                self.sampTime = numChannels*1E-6/(fMax-fMin)
            else:
                self.sampTime = sampTime

        return None
Example #4
0
def TipDetector(I):
    
    I.flags.writeable = True
    
    # Convert RGB to YUV
    Y=0.3*I[:,:,2]+0.6*I[:,:,1]+0.1*I[:,:,0]
    V=0.4375*I[:,:,2]-0.375*I[:,:,1]-0.0625*I[:,:,0]
    U=-0.15*I[:,:,2]-0.3*I[:,:,1]+0.45*I[:,:,0]

    # Find pink
    M=np.ones((np.shape(I)[0], np.shape(I)[1]), np.uint8)*255
    for i in range(0,np.shape(I)[0]):
        for j in range(0,np.shape(I)[1]):
            if V[i,j]>15 and U[i,j]>-7:
                M[i,j]=0
    kernel = np.ones((5,5),np.uint8)   
    M = cv2.morphologyEx(M, cv2.MORPH_OPEN, kernel)
    M=cv2.GaussianBlur(M,(7,7),8)
    
    # find Harris corners in pink mask
    dst = cv2.cornerHarris(M,5,3,0.04)
    dst = cv2.dilate(dst,None)
    ret, dst = cv2.threshold(dst,0.7*dst.max(),255,0)
    dst = np.uint8(dst)
    E = np.where(dst > 0.01*dst.max())
    
    # find Harris corners in image
    gray1 = cv2.cvtColor(I,cv2.COLOR_BGR2GRAY)
    gray1 = np.float32(gray1)
    dst1 = cv2.cornerHarris(gray1,3,3,0.04)
    dst1 = cv2.dilate(dst1,None)
    ret1, dst1 = cv2.threshold(dst1,0.01*dst1.max(),255,0)
    dst1 = np.uint8(dst1)
    E1 = np.where(dst1 > 0.01*dst1.max())

    # no tip identified  
    if not E or not E1:
        return [0,0]
    
    # Rearrange the coordinates in more readable format
    ind1 = np.lexsort((E1[1],E1[0]))
    C1=[(E1[1][i],E1[0][i]) for i in ind1]
    ind = np.lexsort((E[1],E[0]))
    C=[(E[1][i],E[0][i]) for i in ind]
    
    # Identify the tip
    D=[]
    for i in range(1,np.shape(C1)[0]):
        for j in range(1,np.shape(C)[0]):
       	    if abs(C1[i][0]-C[j][0])<5 and abs(C1[i][1]-C[j][1])<5:
                D.append([int(np.uint(C1[i][0]*2)), int(np.uint(C1[i][1]*2))])
    if not D:
        return [0,0]
    else:
        return count(D)
def fun_select_image_area(image_data):
    """The function select idealy the area whith information in it.

        Basically I'm defining a grid and take only the center as important area.
    """

    ss = np.shape(image_data)
    h = np.uint(np.linspace(0, ss[0], 6))
    v = np.uint(np.linspace(0, ss[1], 6))
    image_data_area = image_data[h[2] : h[3], v[2] : v[3], :]
    # image_data_area = image_data[h[1]:h[4],v[1]:v[4],:]

    return image_data_area
def max_32_val():
	x = 0
	numpy.uint(x)
	x = 0xFFFFFFFFFF #Can go up to 10 byte representation? long int?
	#x = 0x800000000; # Mask for 1000 0000 0000.... 0000
	#x = x >> 32; Interestingly... it uses logic right shift, not arith.
	print x
	x = x/8
	print("Number of bytes: ", x)
	x = x/1024
	print("Number of Kilobytes: ", x)
	x = x/1024
	print("Number of Megabytes: ", x)
	x = x/1024
	print("Number of Gigabytes: ", x)
Example #7
0
def downsample(image, scale):
    """Downsample an image down to a smaller image by a factor of `scale`, by
    averaging the bins."""
    result_shape = np.uint(np.array(image.shape) // scale)
    result = np.zeros(result_shape, dtype=image.dtype)
    num_avg = scale**2
    if len(result_shape) == 2:
        # 2d downsample
        # XXX: I know this is topography, so scale down the z-axis, too.
        ylim, xlim = result_shape
        for y in range(ylim):
            for x in range(xlim):
                xmin, xmax = x * scale, (x+1) * scale
                ymin, ymax = y * scale, (y+1) * scale
                result[y,x] = np.mean(image[ymin:ymax, xmin:xmax] / scale)
    elif len(result_shape) == 3:
        # 3d downsample
        # XXX: I know this is price data, so sum it instead of averaging.
        zlim, ylim, xlim = result_shape
        for z in range(zlim):
            for y in range(ylim):
                for x in range(xlim):
                    zmin, zmax = z * scale, (z+1) * scale
                    xmin, xmax = x * scale, (x+1) * scale
                    ymin, ymax = y * scale, (y+1) * scale
                    result[z,y,x] = np.sum(image[zmin:zmax, ymin:ymax, xmin:xmax])
    return result
 def __init__(self, opt):
     # option (dictionary) contains
     # mandatory:
     # season, batter/pitcher
     self.season = []
     if len(opt['season']) == 1:
         self.season.append(str(opt['season'][0]))
     else:            
         self.season = np.linspace(opt['season'][0],
                                   opt['season'][1], 
                                   opt['season'][1]-opt['season'][0]+1)
         for i in range(len(self.season)):
             self.season[i] = np.uint(self.season[i])
     # batter or pitcher
     self.type = opt['type']
     # options:
     # position, file path, etc (tbd)
     if 'postion' in opt:
         self.position = opt['position']
     else:
         self.position = 'NULL'
     if 'file' in opt:
         self.fp = opt['file']
     else:
         self.fp = []
     # initialize DB
     # DB structure
     # [season] -> [each files] -> [each line]
     self.db = []
Example #9
0
def parse_text(file_name):
    """Parse data from Ohio State University text mocap files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""

    # Read the header
    fid = open(file_name, 'r')
    point_names = np.array(fid.readline().split())[2:-1:3]
    fid.close()
    for i in range(len(point_names)):
        point_names[i] = point_names[i][0:-2]

    # Read the matrix data
    S = np.loadtxt(file_name, skiprows=1)
    field = np.uint(S[:, 0])
    times = S[:, 1]
    S = S[:, 2:]

    # Set the -9999.99 markers to be not present
    S[S==-9999.99] = np.NaN

    # Store x, y and z in different arrays
    points = []
    points.append(S[:, 0:-1:3])
    points.append(S[:, 1:-1:3])
    points.append(S[:, 2:-1:3])

    return points, point_names, times
Example #10
0
File: Learn.py Project: e-271/ocr
def _load_MNIST(datafile, labelfile):
    #Get training data
    df = open(datafile, 'rb')

    magic = int(binascii.hexlify(df.read(4)), 16)
    assert magic == 2051
    num_examples = int(binascii.hexlify(df.read(4)), 16)
    i = int(binascii.hexlify(df.read(4)), 16)
    j = int(binascii.hexlify(df.read(4)), 16)

    #I only have to work with the feature matrix in terms of its rows,
    #so I store it as a list of <train.num_examples> rows.
    one = np.array([np.uint(255)])
    features = []
    for example in range(0, num_examples):
        #Create a numpy uint8 array of pixels. We set the first attribute to 1, because it corresponds to a y-intercept term in [theta].
        features.append(np.concatenate((one, np.fromfile(df, dtype='u1', count=i*j))))


    lf = open(labelfile, 'rb')
    assert (int(binascii.hexlify(lf.read(4)), 16)) == 2049                  #check magic
    images = (int(binascii.hexlify(lf.read(4)), 16))
    labels = np.fromfile(lf, dtype='u1', count=images)

    data = Data(features=features, labels=labels, theta=np.zeros((10, 785)),
                num_features=i*j, num_labels=10, num_examples=num_examples,
                alpha=0.01, epsilon = 0.01)
    return data
Example #11
0
def deterministic_shuffle(list_, seed=1):
    r"""
    Args:
        list_ (list):
        seed (int):

    Returns:
        list: list_

    CommandLine:
        python -m utool.util_numpy --test-deterministic_shuffle

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_numpy import *  # NOQA
        >>> list_ = [1,2,3,4,5,6]
        >>> seed = 1
        >>> list_ = deterministic_shuffle(list_, seed)
        >>> result = str(list_)
        >>> print(result)
        [4, 6, 1, 3, 2, 5]
    """
    rand_seed = np.uint32(np.random.rand() * np.uint(0 - 2) / 2)
    if not isinstance(list_, (np.ndarray, list)):
        list_ = list(list_)
    seed_ = len(list_) + seed
    np.random.seed(seed_)
    np.random.shuffle(list_)
    np.random.seed(rand_seed)  # reseed
    return list_
Example #12
0
def sanitize_refreq(origin, dest):

    dest.create_dataset(name="data", data=origin["Data"].value.transpose((2,0,1,3)))
    dest["data"].attrs.create('__complex__', "1")

    dest.create_group(name="indices")
    exec("indL = %s"%origin["IndicesL"].value)
    exec("indR = %s"%origin["IndicesR"].value)
    indL = [ str(i) for i in indL ]
    indR = [ str(i) for i in indR ]
    dest["indices"].create_dataset(name="left", data=indL)
    dest["indices"].create_dataset(name="right", data=indR)

    dest.create_group(name="singularity")
    dest["singularity"].create_dataset(name="data", data=origin["Tail"]["array"].value.transpose((2,0,1,3)))
    dest["singularity"]["data"].attrs.create('__complex__', "1")
    dest["singularity"].create_dataset(name="omin", data=origin["Tail"]["OrderMinMIN"].value)
    mask = numpy.zeros( dest["singularity"]["data"].shape[0:2], numpy.integer )
    mask.fill(origin["Tail"]["OrderMax"].value)
    dest["singularity"].create_dataset(name="mask", data=mask)

    dest.create_group(name="mesh")
    size = numpy.uint(len(origin["Mesh"]["array"].value))
    min_w = origin["Mesh"]["array"].value[0]
    max_w = origin["Mesh"]["array"].value[-1]
    dest["mesh"].create_dataset(name="kind", data=1)
    dest["mesh"].create_dataset(name="min", data=min_w)
    dest["mesh"].create_dataset(name="max", data=max_w)
    dest["mesh"].create_dataset(name="size", data=size)

    return ['Data', 'IndicesL', 'IndicesR', 'Mesh', 'Name', 'Note', 'Tail']
Example #13
0
def sanitize_imfreq(origin, dest):

    dest.create_dataset(name="data", data=origin["Data"].value.transpose((2,0,1,3)))
    dest["data"].attrs.create('__complex__', "1")

    dest.create_group(name="indices")
    exec("indL = %s"%origin["IndicesL"].value)
    exec("indR = %s"%origin["IndicesR"].value)
    indL = [ str(i) for i in indL ]
    indR = [ str(i) for i in indR ]
    dest["indices"].create_dataset(name="left", data=indL)
    dest["indices"].create_dataset(name="right", data=indR)

    dest.create_group(name="singularity")
    dest["singularity"].create_dataset(name="data", data=origin["Tail"]["array"].value.transpose((2,0,1,3)))
    dest["singularity"]["data"].attrs.create('__complex__', "1")
    dest["singularity"].create_dataset(name="omin", data=origin["Tail"]["OrderMinMIN"].value)
    mask = numpy.zeros( dest["singularity"]["data"].shape[0:2], numpy.integer )
    mask.fill(origin["Tail"]["OrderMax"].value)
    dest["singularity"].create_dataset(name="mask", data=mask)

    dest.create_group(name="mesh")
    beta = origin["Mesh"]["Beta"].value
    pi = numpy.arccos(-1)
    size = numpy.uint(len(origin["Mesh"]["array"].value))
    dest["mesh"].create_dataset(name="kind", data=2)
    dest["mesh"].create_dataset(name="min", data=pi/beta)
    dest["mesh"].create_dataset(name="max", data=(2*size+1)*pi/beta)
    dest["mesh"].create_dataset(name="size", data=size)
    dest["mesh"].create_group(name="domain")
    dest["mesh"]["domain"].create_dataset(name="beta", data=beta)
    dest["mesh"]["domain"].create_dataset(name="statistic", data={"Fermion":"F", "Boson":"B"}[origin["Mesh"]["Statistic"].value] )

    return ['Data', 'IndicesL', 'IndicesR', 'Mesh', 'Name', 'Note', 'Tail']
Example #14
0
    def __get_excit_wfm(filepath):
        """
        Returns the excitation BE waveform present in the more parms.mat file
        
        Parameters
        ------------
        filepath : String / unicode
            Absolute filepath of the .mat parameter file
        
        Returns
        -----------
        ex_wfm : 1D numpy float array
            Band Excitation waveform

        """
        if not path.exists(filepath):
            warn('BEPSndfTranslator - NO more_parms.mat file found')
            return np.zeros(1000, dtype=np.float32)

        if 'more_parms' in filepath:
            matread = loadmat(filepath, variable_names=['FFT_BE_wave'])
            fft_full = np.complex64(np.squeeze(matread['FFT_BE_wave']))
            bin_inds = None
            fft_full_rev = None
        else:
            matread = loadmat(filepath, variable_names=['FFT_BE_wave', 'FFT_BE_rev_wave', 'BE_bin_ind'])
            bin_inds = np.uint(np.squeeze(matread['BE_bin_ind'])) - 1
            fft_full = np.complex64(np.squeeze(matread['FFT_BE_wave']))
            fft_full_rev = np.complex64(np.squeeze(matread['FFT_BE_rev_wave']))

        return fft_full, fft_full_rev, bin_inds
Example #15
0
 def initialize_cost_map(self):
     ''' Performs all the neccessary initialization
     and creation of the cost map graph.
     '''
     self.params['stripWidth'] = np.uint(np.double(self.costm.shape) \
         / self.params['pixels'])
     #self._add_image_strips()
     self.graph = build_graph(self.costm)
    def createHashTable(kd, vd, capacity):
        table_capacity_gpu, _ = mod.get_global('table_capacity')
        cuda.memcpy_htod(table_capacity_gpu, np.uint([capacity]))

        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_capacity,
        #           &capacity,
        #           sizeof(unsigned int)));

        table_vals_gpu, table_vals_size = mod.get_global('table_values') # pointer-2-pointer
        values_gpu = gpuarray.zeros((capacity*vd,1), dtype=np.float32)
        # values_gpu = gpuarray.zeros((capacity*vd,1), dtype=np.float32)
        # cuda.memset_d32(values_gpu.gpudata, 0, values_gpu.size)
        cuda.memcpy_dtod(table_vals_gpu, values_gpu.gpudata, table_vals_size)

        # float *values;
        # allocateCudaMemory((void**)&values, capacity*vd*sizeof(float));
        # CUDA_SAFE_CALL(cudaMemset((void *)values, 0, capacity*vd*sizeof(float)));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_values,
        #                   &values,
        #                   sizeof(float *)));

        table_entries, table_entries_size = mod.get_global('table_entries')
        entries_gpu = gpuarray.empty((capacity*2,1), dtype=np.int)
        entries_gpu.fill(-1)
        # cuda.memset_d32(entries_gpu.gpudata, 1, entries_gpu.size)
        cuda.memcpy_dtod(table_entries, entries_gpu.gpudata, table_entries_size)

        # int *entries;
        # allocateCudaMemory((void **)&entries, capacity*2*sizeof(int));
        # CUDA_SAFE_CALL(cudaMemset((void *)entries, -1, capacity*2*sizeof(int)));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_entries,
        #                   &entries,
        #                   sizeof(unsigned int *)));

        ########################################
        # Assuming LINEAR_D_MEMORY not defined #
        ########################################

        #  #ifdef LINEAR_D_MEMORY
        # char *ranks;
        # allocateCudaMemory((void**)&ranks, capacity*sizeof(char));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_rank,
        #                   &ranks,
        #                   sizeof(char *)));
        #
        # signed short *zeros;
        # allocateCudaMemory((void**)&zeros, capacity*sizeof(signed short));
        # CUDA_SAFE_CALL(cudaMemcpyToSymbol(table_zeros,
        #                   &zeros,
        #                   sizeof(char *)));
        #
        # #else

        table_keys_gpu, table_keys_size = mod.get_global('table_keys')
        keys_gpu = gpuarray.zeros((capacity*kd,1), dtype=np.short)
        # keys_gpu = gpuarray.empty((capacity*kd,1), dtype=np.short)
        # cuda.memset_d32(keys_gpu.gpudata, 0, keys_gpu.size)
        cuda.memcpy_dtod(table_keys_gpu, keys_gpu.gpudata, table_keys_size)
Example #17
0
 def values_labels(self):
     labels = self.stata_object.values_labels
     new_labels_dict = dict()
     for var in labels:
         var_label = list(labels[var].items())
         label_tuple = [(np.uint(item[0]).astype(int), item[1])
                        for item in var_label]
         new_labels_dict[var] = dict(label_tuple)
     return new_labels_dict
def getInfoFromPath(fpath):
    """

    :param fpath:
    :return:
    """
    if fpath.lower().endswith(".npy"):
        fpath, fname = os.path.split(fpath)
    return fpath.rsplit(os.path.sep, 3)[-1], np.uint(re.split('_|.npy', fname)[-2])
Example #19
0
 def test_fill_value_uint(self):
     fill_value = np.uint(1234)
     for dtype in self.dtypes:
         data = np.array([0], dtype=dtype)
         dm = DataManager(data)
         dm.fill_value = fill_value
         [expected] = np.array([fill_value], dtype=dtype)
         self.assertEqual(dm.fill_value, expected)
         self.assertEqual(dm.fill_value.dtype, dtype)
    def clean(self):
        rtn_array = self.img.copy()
        if self.color_flag:
            red = rtn_array[:,:,0]
            green = rtn_array[:,:,1]
            blue = rtn_array[:,:,2]
            a = numpy.array([red,green,blue])
            rtn_array = numpy.bitwise_and(a.flatten(),numpy.uint(254))
            rtn_array = rtn_array.reshape(3,-1)
            rtn_array = numpy.dstack(tuple(rtn_array))
            rtn_array = rtn_array.reshape(self.img_size[0],self.img_size[1],3)
        else:
            a = numpy.array([rtn_array])
            rtn_array = numpy.bitwise_and(a.flatten(),numpy.uint(254))
            rtn_array = rtn_array.reshape(self.img_size[0],self.img_size[1])

        rtn_img = Payload(rtn_array)
        return rtn_img.img
Example #21
0
def _drawSpot(image, coords, radius, brightness):
	'''
	Draw a single spot at given location.
	
	@param image	Container numpy array.
	@param coords	Location of spot.
	@param radius	Spot size.
	@param	brightness	Spot luminosity.
	'''

	maskX, maskY = _createMask(radius*4)
	
	b = np.exp(-(maskX**2+maskY**2)/(16*radius**2))*brightness
	#b = brightness

	maskX = np.uint(maskX + np.round(coords[0]))
	maskY = np.uint(maskY + np.round(coords[1]))
	
	image[maskX, maskY] += b
Example #22
0
    def __init__(self, name, nbits):
        """
        The hash name is used in storage to store buckets of
        different hashes without collision.
        """
        self.name = name
        self.nbits = nbits

        # It's more efficient to store uint than bitcodes represented as string.
        self.bits_to_int = np.array([np.uint(2**i) for i in range(self.nbits)])
Example #23
0
 def set_file_list(self):
     for i in range(len(self.season)):
         temp_list = []
         file_path = os.path.join('../data/fangraphs', 
                                  str(np.uint(self.season[i])),
                                  self.type)
         contents = os.listdir(file_path)
         for j in range(len(contents)):
             temp_list.append(file_path + '/' + contents[j])
         self.fp.append(temp_list)
    def __init__(self,timeSeries=None,
                 lenSeries=2**18,
                 numChannels=1,
                 fMin=400,fMax=800,
                 sampTime=None,
                 noiseRMS=0.1):

        self.shape = (np.uint(lenSeries),np.uint(numChannels))
        self.fMax = fMax
        self.fMin = fMin        
        
        if sampTime is None:
            self.sampTime = np.uint(numChannels)*1E-6/(fMax-fMin)
        else:
            self.sampTime = sampTime

        if timeSeries is None:
            # then use the rest of the data to generate a random timeseries
            if VERBOSE:
                print "IntensityTimeSeries __init__ did not get new data, generating white noise data"

            self.timeSeries = noiseRMS*np.float16(random.standard_normal(self.shape))
                                                     
        else:
            if VERBOSE:
                print "IntensityTimeSeries __init__ got new data, making sure it is reasonable."

            if len(timeSeries.shape) == 1:
                self.shape = (timeSeries.shape[0],1)
                
            else:
                self.shape = timeSeries.shape

            self.timeSeries = np.reshape(np.float16(timeSeries),self.shape)
            
            self.fMin = fMin
            self.fMax = fMax

            if sampTime is None:
                self.sampTime = numChannels*1E-6/(fMax-fMin)
            else:
                self.sampTime = sampTime
 def get_panorama_row_from_elevation(self, elevation, use_LUT=True, debug=False):
     '''
     @param elevation: The target elevation angle (in radians) for which the corresponding row in the panoramic image is to be found.
     @retval is_valid: False indicates that the angle is outside of the allowed elevation range.
     @retval row: The valid row number (first row is index 0 and last row is height-1) in the panorama where the elevation maps to.
     '''
     elevation_validated = np.where(np.logical_and(self.model.lowest_elevation_angle <= elevation, elevation <= self.model.highest_elevation_angle), elevation, np.nan)
     h = np.tan(elevation_validated)
     s = self.cyl_height_max - h
     row = np.where(np.isnan(s), np.nan, np.uint(s / self.pixel_size))  # This helps maintaining non-nan values as uint (trust me!)
     return row
Example #26
0
		def make_cell_lineage_mask(tval, target_cc_index, Seg):

			labels = np.unique(Seg)
			target_cc_index = set(target_cc_index)
			
			# make the segment border max value 
			max_val = len( self.cell_tracker.list_of_cell_profiles_per_timestamp[tval].list_of_cell_profiles)
			half_val = int(max_val / 2.)
			mask = np.uint(Seg == 0) * max_val

			for label in labels:
				if label>1:
					cp_index = self.cell_tracker.list_of_cell_profiles_per_timestamp[tval].seg_label_to_cp_list_index[label]
					node_name = "t%d_c%d" % (tval, cp_index) 
					cc_index = cc_dict[node_name]
					
					if cc_index in target_cc_index:
						mask += np.uint(Seg==label) * half_val
						print "--- LINK: Label:", label,"| Tracklet:", cc_index,"| CellProfile:", cp_index, "| Node:", node_name, "| t:",tval
			return mask
Example #27
0
 def __init__(self, molno, isono):
     data = molDB()
     molmissing = True
     isomissing = True
     for i in range(1,len(data.dbdata['spec'])):
         #print  i, data.dbdata['spec'][i], molno
         if np.uint(data.dbdata['spec'][i]) == molno:
             molmissing = False
             #print i, data.dbdata['isono'][i], isono
             if np.uint(data.dbdata['isono'][i]) == isono:
                 isomissing = False
                 self.spec = molno
                 self.molname = data.dbdata['molname'][i]
                 self.isono = isono
                 self.isocode = data.dbdata['isocode'][i]
                 self.isoname = data.dbdata['isoname'][i]
                 self.LL = data.dbdata['LL'][i]
                 break
     
     if molmissing or isomissing: sys.exit("Combination of molecule number and isotopologue number not found in MolTran.txt. \n"+
                                           "Tried molno = "+str(molno) + 'isono: '+str(isono))       
Example #28
0
 def get_dolfin_mesh(self):
     mesh = dolfin.Mesh()
     me = dolfin.MeshEditor()
     me.open(mesh,'tetrahedron', 3, 3)
     me.init_vertices(len(self.coordinates))
     me.init_cells(len(self.element_nodes))
     for i, coord in enumerate(self.coordinates):
         me.add_vertex(N.uint(i), *coord)
     for i, el_nodes in enumerate(self.element_nodes):
         me.add_cell(i, *el_nodes)
     me.close()
     return mesh
Example #29
0
def get_random_hex_id():
    """
    Generates 64 bit hex id for a capn proto msg, then prepends "0x"

    Args:
        None

    Return:
        64 bit hex ID used in Capnproto message formats.

    """
    val = numpy.random.randint(1, numpy.iinfo(numpy.uint64).max, dtype='uint64') | (numpy.uint(1) << numpy.uint(63))
    return "{0:#0{1}x}".format(val, 1)
Example #30
0
    def update(self,frame,diff_frame):
        """Update the detection status with a new image"""
        self.gray_frame = np.uint8(np.mean(diff_frame,2))
        self.image_moments = moments(self.gray_frame)

        # Update the moving average window
        self.change_window = np.roll(self.change_window,1)
        self.change_window[0] = self.image_moments['m00']

        self.mom_x =  np.uint(self.image_moments['m10']/(self.image_moments['m00']+1))
        self.mom_y =  np.uint(self.image_moments['m01']/(self.image_moments['m00']+1))

        # Detection window output
        #self.gray_frame[self.mom_y:self.mom_y+5,self.mom_x:self.mom_x+5] = 255
        #cv2.imshow('preview',self.gray_frame)

        # Update the moving regression window with the new measurements
        self.regression_window.push(self.mom_x)
        self.time_change_window.push(self.image_moments['m00'])
        
        # Update regressor variables
        self.t_out,self.p_out,self.n_out = self.regression_window.fetch(self.DETECTION_TIME)
        _,self.change_out,_ = self.time_change_window.fetch(self.REF_IM_UPDATE_TIME)
        if np.max(self.change_out) < self.REF_IM_THRESHOLD:
          self.camera.update_reference_frame(frame)
        print np.max(self.change_out)

        if len(self.t_out) > self.MINIMUM_REGRESSION_SAMPLES:
            self.slope,self.intercept ,_,_,self.std_dev = stats.linregress(self.t_out,self.p_out)

            ############################################################
            ############### Dirty hack! Flipped webcam #################
            ## self.slope = -self.slope 
            ############################################################
            ############################################################

            self.pred_y = self.slope*self.t_out + self.intercept*np.ones(self.t_out.shape)
            self.y_dev = np.sqrt(np.sum((self.p_out-self.pred_y)**2))
Example #31
0
    def clustering(self, x: np.ndarray, k: int, **kwargs) -> tuple:
        """A fast implementation of k means clustering.
        Credit to Xinlei Chen, Deng Cai
        for more details, please refer to:
        http://www.cad.zju.edu.cn/home/dengcai/Data/Clustering.html
        """
        n, p = x.shape
        # Init centeroids: randomly choose k rows of data, shape = (k, p)
        assert 0 < k < n
        centeroids = self.__init_centeroids(x, k)

        # Start iterations
        iters = 0
        assign = np.uint(np.zeros(n))
        # Get square sum of x
        x_square = self.__calc_square_sum(x)

        while True:
            iters += 1
            # Calculate distances between centeroids and each row:
            # (x - c)^2 = x^2 - 2cx + c^2
            # shape of dists = (n, k)
            ctr_square = self.__calc_square_sum(centeroids)
            dists = -2 * np.matmul(x, centeroids.T)
            dists += ctr_square + x_square
            new_assign = dists.argmin(axis=1)
            # Check whether assign has changed
            if np.array_equal(new_assign, assign):
                break
            assign = new_assign
            # Update centeroids of each cluster
            for i in range(k):
                new_centeroid = x[assign == i].mean(axis=0)
                centeroids[i] = new_centeroid
            # Break if iteration times exceeds bound
            if iters >= self._max_iter:
                break

        # Calculate sum of distance
        dist_sum = self.__calc_dist_sum(x, assign, centeroids)
        return assign, centeroids, dist_sum
Example #32
0
def sanitize_imfreq(origin, dest):

    dest.create_dataset(name="data",
                        data=origin["Data"].value.transpose((2, 0, 1, 3)))
    dest["data"].attrs.create('__complex__', "1")

    dest.create_group(name="indices")
    exec("indL = %s" % origin["IndicesL"].value)
    exec("indR = %s" % origin["IndicesR"].value)
    indL = [str(i) for i in indL]
    indR = [str(i) for i in indR]
    dest["indices"].create_dataset(name="left", data=indL)
    dest["indices"].create_dataset(name="right", data=indR)

    dest.create_group(name="singularity")
    dest["singularity"].create_dataset(name="data",
                                       data=origin["Tail"]["array"].value)
    dest["singularity"]["data"].attrs.create('__complex__', "1")
    dest["singularity"].create_dataset(
        name="omin", data=origin["Tail"]["OrderMinMIN"].value)
    mask = numpy.zeros(dest["singularity"]["data"].shape[0:2], numpy.integer)
    mask.fill(origin["Tail"]["OrderMax"].value)
    dest["singularity"].create_dataset(name="mask", data=mask)

    dest.create_group(name="mesh")
    beta = origin["Mesh"]["Beta"].value
    pi = numpy.arccos(-1)
    size = numpy.uint(len(origin["Mesh"]["array"].value))
    dest["mesh"].create_dataset(name="kind", data=2)
    dest["mesh"].create_dataset(name="min", data=pi / beta)
    dest["mesh"].create_dataset(name="max", data=(2 * size + 1) * pi / beta)
    dest["mesh"].create_dataset(name="size", data=size)
    dest["mesh"].create_group(name="domain")
    dest["mesh"]["domain"].create_dataset(name="beta", data=beta)
    dest["mesh"]["domain"].create_dataset(name="statistic",
                                          data={
                                              "Fermion": "F",
                                              "Boson": "B"
                                          }[origin["Mesh"]["Statistic"].value])

    return ['Data', 'IndicesL', 'IndicesR', 'Mesh', 'Name', 'Note', 'Tail']
Example #33
0
def _get_component_slice(components):
    """
    Check the components object to determine how to use it to slice the dataset

    Parameters
    ----------
    components : {int, iterable of ints, slice, or None}
        Input Options
        integer: Components less than the input will be kept
        length 2 iterable of integers: Integers define start and stop of component slice to retain
        other iterable of integers or slice: Selection of component indices to retain
        None: All components will be used
    Returns
    -------
    comp_slice : slice or numpy array of uints
        Slice or array specifying which components should be kept

    """

    comp_slice = slice(None)

    if isinstance(components, int):
        # Component is integer
        comp_slice = slice(0, components)
    elif hasattr(components, '__iter__') and not isinstance(components, dict):
        # Component is array, list, or tuple
        if len(components) == 2:
            # If only 2 numbers are given, use them as the start and stop of a slice
            comp_slice = slice(int(components[0]), int(components[1]))
        else:
            #Convert components to an unsigned integer array
            comp_slice = np.uint(np.round(components)).tolist()
    elif isinstance(components, slice):
        # Components is already a slice
        comp_slice = components
    elif components is not None:
        raise TypeError(
            'Unsupported component type supplied to clean_and_build.  Allowed types are integer, numpy array, list, tuple, and slice.'
        )

    return comp_slice
Example #34
0
def gaussianFilter(image, size=7, sigma=1, gray=True):
	"""
	功能:对图像进行高斯滤波
	:param image: 输入图像
	:param size: 高斯核(高斯模板)的尺寸,默认值为7
	:param sigma: 生成高斯核(高斯模板)的标准差,默认值为1
	:param gray: 标记输入图片是否为灰度图像
	:return: 高斯滤波后的图像
	"""
	if not gray:
		imageNew = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
	else:
		imageNew = image.copy()
	height, width = imageNew.shape[0], image.shape[1]
	exband = np.uint((size - 1) / 2)
	imageExband = cv.copyMakeBorder(imageNew, exband, exband, exband, exband, cv.BORDER_REPLICATE)
	gCore = getGaussianCore(size, sigma)
	for i in range(height):
		for j in range(width):
			imageNew[i, j] = np.sum(gCore * imageExband[i:i+size,j:j+size])
	return imageNew
Example #35
0
def sanitize_legendre(origin, dest):

    dest.create_dataset(name="data", data=origin["Data"].value.transpose((2,0,1,3)))

    dest.create_group(name="indices")
    exec("indL = %s"%origin["IndicesL"].value)
    exec("indR = %s"%origin["IndicesR"].value)
    indL = [ str(i) for i in indL ]
    indR = [ str(i) for i in indR ]
    dest["indices"].create_dataset(name="left", data=indL)
    dest["indices"].create_dataset(name="right", data=indR)

    dest.create_group(name="mesh")
    beta = origin["Mesh"]["Beta"].value
    size = numpy.uint(len(origin["Mesh"]["array"].value))
    dest["mesh"].create_group(name="domain")
    dest["mesh"]["domain"].create_dataset(name="beta", data=beta)
    dest["mesh"]["domain"].create_dataset(name="n_max", data=size)
    dest["mesh"]["domain"].create_dataset(name="statistic", data={"Fermion":"F", "Boson":"B"}[origin["Mesh"]["Statistic"].value] )

    return ['Data', 'IndicesL', 'IndicesR', 'Mesh', 'Name', 'Note', 'Tail']
Example #36
0
    def __init__(
        self,
        size=np.uint(100),
        susceptible_share=0.69,
        infected_share=0.01,
        infection_prob=0.2,
        remove_prob=0.6,
        lethality=0.03,
    ):
        self.__data = None
        self.__seed = None
        self.seed()

        self.set_size(size)
        self.set_susceptible_share(susceptible_share)
        self.set_infected_share(infected_share)
        self.set_infection_prob(infection_prob)
        self.set_remove_prob(remove_prob)
        self.set_lethality(lethality)

        self.reset()
def generate_gray(img_path, Fixed_RESHAPE_SIZE, mode='png'):

    if mode == 'dcm':
        ds = pydicom.dcmread(img_path)
        # ds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
        img = np.uint(ds.pixel_array)
        high = np.max(img)  # 找到最大的
        low = np.min(img)  # 找到最小的
        # 调用函数,开始转换
        img = convert_from_dicom_to_jpg(img, low, high)

        img = np.array(
            Image.fromarray(np.uint8(img)).resize(
                (Fixed_RESHAPE_SIZE, Fixed_RESHAPE_SIZE), Image.ANTIALIAS))
    else:
        img = np.asarray(
            np.uint8(
                Image.open(img_path).convert("L").resize(
                    (Fixed_RESHAPE_SIZE, Fixed_RESHAPE_SIZE))))

    return img
Example #38
0
    def run(self, x):
        labels_key = self.keys_correspondences["labels_key"]
        features_key = self.keys_correspondences["features_key"]
        access_ids_key = self.keys_correspondences["access_ids_key"]
        output_type_key = self.keys_correspondences["output_type_key"]

        labels = x[labels_key]
        ind_attack = np.array([])
        for i in self._index_filter:
            ind_attack = np.concatenate((ind_attack, np.where(labels == i)[0]),
                                        axis=0)

        ind_gen = np.where(labels == 0)[0]
        indices = np.uint(np.concatenate((ind_gen, ind_attack), axis=0))

        # for k, v in X.items(): X[k] = X[k][indices]
        x[features_key] = x[features_key][indices]
        x[access_ids_key] = x[access_ids_key][indices]
        x[labels_key] = x[labels_key][indices]
        x[output_type_key] = ProcessorOutputType.LIKELIHOOD
        return x
Example #39
0
def remove_small_lesions(seg, min_size=10, verbose=False):

    seg = np.uint(seg > 0)

    c_filter = sitk.ConnectedComponentImageFilter()
    c_filter.FullyConnectedOn()

    seg = sitk.GetImageFromArray(seg)
    seg = c_filter.Execute(seg)
    seg = sitk.GetArrayFromImage(seg)

    n_lesion = seg.max()

    for i in range(1, n_lesion + 1):

        if np.sum(seg == i) < min_size:
            if verbose:
                print('Found one, ', np.sum(seg == i))
            seg[seg == i] = 0

    return (seg > 0).astype(np.int8)
Example #40
0
def filterColor():
    cap = cv2.VideoCapture(0)

    while True:
        _, frame = cap.read()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        lower_red = np.array([150, 150, 150])
        upper_red = np.array([250, 250, 255])

        mask = cv2.inRange(hsv, lower_red, upper_red)
        res = cv2.bitwise_and(frame, frame, mask=mask)

        #Add blurring
        # kernel = np.ones((15, 15), np.float32) / 225
        # smoothed = cv2.filter2D(res, -1, kernel)
        # gaussian = cv2.GaussianBlur(res, (15, 15), 0)
        medianBlur = cv2.medianBlur(res, 15)

        #Add morphological transformation
        kernel = np.ones((5, 5), np.uint(8))
        erosion = cv2.erode(mask, kernel, iterations=1)
        dilation = cv2.dilate(mask, kernel, iterations=1)
        # cv2.imshow('smoothed', medianBlur)
        # cv2.imshow('erosion', erosion)
        # cv2.imshow('dilation', dilation)

        #opening - removing false positives (in the background)
        opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        cv2.imshow('opening', opening)

        #closing - removing false negatives (in the object)
        closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
        cv2.imshow('closing', closing)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    cap.release()
def averageFilter(image, size, gray=True):
	"""
	:param image: 输入图像
	:param size: 滤波器尺寸
	:param gray: 输入图像是否为灰度图像
	:return: 返回均值滤波后的图像
	
	"""
	height, width = image.shape[0], image.shape[1]
	exband = np.uint((size - 1) / 2)
	imageExband = cv.copyMakeBorder(image, exband, exband, exband, exband, cv.BORDER_REPLICATE)#对图像进行扩展
	imageNew = image.copy()
	if gray:
		for i in range(height):
			for j in range(width):
				imageNew[i, j] = np.mean(imageExband[i:i+size, j:j+size])
	else:
		for ch in range(3):
			for i in range(height):
				for j in range(width):
					imageNew[i, j, ch] = np.mean(imageExband[i:i+size, j:j+size, ch])
	return imageNew
Example #42
0
def least_path(neighbors, start, end, superpixelColor):

    visited = []
    unvisited = list(range(np.shape(neighbors)[0]))

    path_cost = np.full((np.size(unvisited), 2), np.inf)

    # start = 0
    path_cost[start, 0] = 0

    while np.size(unvisited) != 0:

        current_vertex = unvisited[np.argmin(path_cost[unvisited, 0])]

        if current_vertex == end:
            break

        unvisited_neighbour = np.intersect1d(neighbors[current_vertex],
                                             unvisited)

        for i in unvisited_neighbour:
            dist = compute_color_distance(superpixelColor[current_vertex],
                                          superpixelColor[i])
            if dist + path_cost[current_vertex, 0] < path_cost[i, 0]:
                path_cost[i] = (dist +
                                path_cost[current_vertex, 0]), current_vertex

        visited.append(current_vertex)
        unvisited.remove(current_vertex)

    # backtrack
    backtrack = []
    backtrack.append(current_vertex)
    while current_vertex != start:

        current_vertex = np.uint(path_cost[current_vertex][1])
        backtrack.append(current_vertex)

    return backtrack
Example #43
0
    def __init__(self):
        NT = namedtuple('NT', tuple('abc'))

        self.values = [
                np.longlong(-1), np.int_(-1), np.intc(-1), np.short(-1), np.byte(-1),
                np.ubyte(1), np.ushort(1), np.uintc(1), np.uint(1), np.ulonglong(1),
                np.half(1.0), np.single(1.0), np.float_(1.0), np.longfloat(1.0),
                np.csingle(1.0j), np.complex_(1.0j), np.clongfloat(1.0j),
                np.bool_(0), np.str_('1'), np.unicode_('1'), np.void(1),
                np.object(), np.datetime64('NaT'), np.timedelta64('NaT'), np.nan,
                12, 12.0, True, None, float('NaN'), object(), (1, 2, 3),
                NT(1, 2, 3), datetime.date(2020, 12, 31), datetime.timedelta(14),
        ]

        # Datetime & Timedelta
        for precision in ['ns', 'us', 'ms', 's', 'm', 'h', 'D', 'M', 'Y']:
            for kind, ctor in (('m', np.timedelta64), ('M', np.datetime64)):
                self.values.append(ctor(12, precision))

        for size in (1, 8, 16, 32, 64, 128, 256, 512):
            self.values.append(bytes(size))
            self.values.append('x' * size)
Example #44
0
    def test_typecast(self):
        sweep_points = [1, 2]
        num_circuits = 1
        num_qubits = 2
        config_fn = os.path.join(curdir, 'test_config_default.json')
        platf = ql.Platform("starmon", config_fn)
        p = ql.Program('test_bug', platf, num_qubits)
        p.set_sweep_points(sweep_points)
        k = ql.Kernel('kernel1', platf, num_qubits)

        qubit = 1

        k.identity(np.int(qubit))
        k.identity(np.int32(qubit))
        k.identity(np.int64(qubit))

        k.identity(np.uint(qubit))
        k.identity(np.uint32(qubit))
        k.identity(np.uint64(qubit))

        # add the kernel to the program
        p.add_kernel(k)
Example #45
0
def pop(x, precisions):
    head_, tail_ = x
    head_ = np.uint64(head_)
    # Modulo as bitwise and
    interval_starts = head_ & np.uint((1 << precisions) - 1)
    def pop(starts, freqs):
        head = freqs * (head_ >> np.uint(precisions)) + interval_starts - starts
        idxs = head < RANS_L
        n = np.sum(idxs)
        if n > 0:
            tail, new_head = stack_slice(tail_, n)
            # Move popped integers into lower-order
            # bits of head
            try:
                head[idxs] = (head[idxs] << np.uint(32)) | new_head
            except TypeError:
                head = (head << np.uint(32)) | new_head
        else:
            tail = tail_
        return head, tail
    
    return interval_starts, pop
Example #46
0
    def run(self):
        # Run gradually on all scale factors (if only one jump then this loop only happens once)
        for self.sf_ind, (sf, self.kernel) in enumerate(
                zip(self.conf.scale_factors, self.kernels)):
            # Relative_sf (used when base change is enabled. this is when input is the output of some previous scale)
            sf = [sf, sf] if np.isscalar(sf) else sf
            self.sf = np.array(sf) / np.array(self.base_sf)

            self.output_shape = np.uint(
                np.ceil(np.array(self.input.shape[0:2]) * sf))

            # Initialize network
            self.init_sess(init_weights=self.conf.init_net_for_each_sf)

            # Train the network
            self.train()

            # Use augmented outputs and back projection to enhance result. Also save the result.
            post_processed_output = self.final_test()

            # Keep the results for the next scale factors SR to use as dataset
            self.hr_fathers_sources.append(post_processed_output)

            # append a corresponding map loss
            self.loss_map_sources.append(
                create_loss_map(im=post_processed_output)
            ) if self.conf.grad_based_loss_map else self.loss_map_sources.append(
                np.ones_like(post_processed_output))

            # In some cases, the current output becomes the new input. If indicated and if this is the right scale to
            # become the new base input. all of these conditions are checked inside the function.
            self.base_change()

        # Return the final post processed output.
        # noinspection PyUnboundLocalVariable
        print(".....****....")
        print(post_processed_output.shape)
        exit(0)
        return post_processed_output
Example #47
0
    def signalLight(self):
        gray2 = cv2.cvtColor(self.cam_img, cv2.IMREAD_GRAYSCALE)
        gray2 = cv2.medianBlur(gray2, 5)
        # self.color = cv2.cvtColor(gray2, cv2.COLOR_GRAY2BGR)
        hsv2 = cv2.cvtColor(self.cam_img, cv2.COLOR_BGR2HSV)

        circles = cv2.HoughCircles(gray2,
                                   cv2.HOUGH_GRADIENT,
                                   1,
                                   20,
                                   param1=80,
                                   param2=60,
                                   minRadius=0,
                                   maxRadius=0)

        circles = np.uint(np.around(circles))
        for c in circles[0, :]:
            center = (c[0], c[1])
            radius = c[2]

            cv2.circle(self.cam_img, center, radius, (0, 255, 0), 2)
            print(self.cam_img[center[1]][center[0]])
Example #48
0
 def export_image(self, name):
     red = Color("red")
     blue = Color("blue")
     white = Color("white")
     black = Color("black")
     gold = Color("gold")
     rgb_gold = []
     for part in gold.rgb:
         part = part * 255
         rgb_gold.append(part)
     rgb_black = []
     for part in black.rgb:
         part = part * 255
         rgb_black.append(part)
     rgb_white = []
     for part in white.rgb:
         part = part * 255
         rgb_white.append(part)
     colours = list(red.range_to(blue, int(self.grains)))
     image = np.zeros([self.space.shape[1], self.space.shape[0], 3],
                      dtype=np.uint(8))
     for grain in range(self.grains + 1):
         rgb = []
         for part in colours[grain - 1].rgb:
             part = part * 255
             rgb.append(part)
         for cell in self.space.flat:
             if cell.state == grain:
                 x, y = cell.find_id()
                 image[x, y] = rgb
             if cell.state == 999:
                 x, y = cell.find_id()
                 image[x, y] = rgb_black
             if cell.state == 500:
                 x, y = cell.find_id()
                 image[x, y] = rgb_gold
     img = Image.fromarray(image.astype('uint8'))
     img = img.resize((self.space.shape[1] * 3, self.space.shape[0] * 3))
     img.save('./static/temp/' + str(name) + '.png')
Example #49
0
def sanitize_imtime(origin, dest):

    dest.create_dataset(name="data", data=origin["Data"].value.transpose((2,0,1)))

    dest.create_group(name="indices")
    exec("indL = %s"%origin["IndicesL"].value)
    exec("indR = %s"%origin["IndicesR"].value)
    indL = [ str(i) for i in indL ]
    indR = [ str(i) for i in indR ]
    dest["indices"].create_dataset(name="left", data=indL)
    dest["indices"].create_dataset(name="right", data=indR)

    dest.create_group(name="singularity")
    dest["singularity"].create_dataset(name="data", data=origin["Tail"]["array"].value)
    dest["singularity"]["data"].attrs.create('__complex__', "1")
    dest["singularity"].create_dataset(name="omin", data=origin["Tail"]["OrderMinMIN"].value)
    mask = numpy.zeros( dest["singularity"]["data"].shape[0:2], numpy.integer )
    mask.fill(origin["Tail"]["OrderMax"].value)
    dest["singularity"].create_dataset(name="mask", data=mask)

    dest.create_group(name="mesh")
    beta = origin["Mesh"]["Beta"].value
    size = numpy.uint(len(origin["Mesh"]["array"].value))
    min_t = origin["Mesh"]["array"].value[0]
    if min_t > 1e-10:
      kind = 0
      assert(abs(min_t - 0.5*beta/size) < 1e-10)
    else:
      kind = 2
    dest["mesh"].create_dataset(name="kind", data=kind)
    dest["mesh"].create_dataset(name="min", data=0.0)
    dest["mesh"].create_dataset(name="max", data=beta)
    dest["mesh"].create_dataset(name="size", data=size)
    dest["mesh"].create_group(name="domain")
    dest["mesh"]["domain"].create_dataset(name="beta", data=beta)
    dest["mesh"]["domain"].create_dataset(name="statistic", data={"Fermion":"F", "Boson":"B"}[origin["Mesh"]["Statistic"].value] )

    return ['Data', 'IndicesL', 'IndicesR', 'Mesh', 'Name', 'Note', 'Tail']
Example #50
0
def put_var_in_wfkdata(wfkdata, varname, varobj, varndim, varshape, vartype):
    if varndim == 0:
        if vartype == N.bool:
            wfkdata.put(varname, N.bool(varobj.getValue()))
        elif vartype == N.int:
            wfkdata.put(varname, N.int(varobj.getValue()))
        elif vartype == N.uint:
            wfkdata.put(varname, N.uint(varobj.getValue()))
        elif vartype == N.float:
            wfkdata.put(varname, N.float(varobj.getValue()))
        elif vartype == N.complex:
            wfkdata.put(varname, N.complex(varobj.getValue()))
        elif vartype == str:
            wfkdata.put(varname, str(varobj.tostring()))
        else:
            error_message = 'Variable "%s" with type "%s" cannot be put in a wavefunction data' % (
                varname, vartype)
            basic_utils.error_exit(error_message)
    else:
        if vartype == N.bool or vartype == N.int or vartype == N.uint or vartype == N.float or vartype == N.complex:
            newarray = N.reshape(N.array(varobj, vartype), varshape)
            #            newarray = N.array(varobj,vartype)
            wfkdata.put(varname, newarray)
        elif vartype == str:
            newstring = str(varobj[:].tostring())
            wfkdata.put(varname, newstring)
        else:
            error_message = 'Variable "%s" with type "%s" and shape "%s" cannot be put in a wavefunction data' % (
                varname, vartype, varshape)
            basic_utils.error_exit(error_message)
    if variables_flags[varname] and varname in variables_attributes_flags:
        for attrname in varobj.ncattrs():
            if attrname in variables_attributes_flags[varname]:
                if variables_attributes_flags[varname][attrname]:
                    attr = varobj.getncattr(attrname)
                    attrtype = get_type(attr)
                    put_attr_in_wfkdata(wfkdata, attrname, attr, attrtype,
                                        varname)
def Convolution_1D(img, fil, border=cv2.BORDER_CONSTANT, padding=True):
    # old
    old_height, old_width = img.shape

    # Matrix transpose and get the first row (vector)
    fil = fil.T[0]
    size_mask = len(fil)

    # Add Padding
    if padding:
        padding = np.uint8((size_mask + 1) / 2)
        img = cv2.copyMakeBorder(img, padding - 1, padding - 1, padding - 1,
                                 padding - 1, border)

    height, width = img.shape
    # mask of 000000...000000m1m2m3...mn0000000...00000000
    num_zeros = width - padding
    size = 2 * num_zeros + size_mask

    mask = np.zeros((1, size), dtype=float)

    mask[0, num_zeros:num_zeros + size_mask] = fil

    result = np.zeros((height, width), dtype=float)

    # Convolute each row
    for j in range(0, height):
        for i in range(0, width):
            offset = num_zeros + np.uint(size_mask * .5) - i
            # Apply Mask x1 * v1 + x2 * v2 + ... (The same as (R * M^t)[0])
            result[j, i] = np.dot(mask[0, offset:offset + width], img[j])

    # Remove padding
    if padding:
        result = result[padding - 1:padding - 1 + old_height,
                        padding - 1:padding - 1 + old_width]

    return result
Example #52
0
class SCGP_GW_gun_position_report_INS(UDPAdapter):
    # 493092871 SCGP_CS_gun_position_report_INS 226.1.3.228 50225 Multicast UDP
    def getMsgIdentifier(self):
        return 493092871

    def getMsgSender(self):
        from config import SCGP
        return SCGP

    ip = "226.1.3.228"
    port = 50225
    formato_payload = "fffffI"
    # DATA
    azimuth = 0.0  # 16	4	1	Float	deg	[0..360]	Angolo di Azimuth. L’origine è il centro meccanico del cannone (DCS)
    elevation = 0.0  # 20	4	1	Float	deg	[-90..90]	Angolo di Elevazione. L’origine è il centro meccanico del cannone (DCS)
    azimuth_cursor = 0.0  # 24	4	1	Float	deg	[0..360]	Angolo di Azimuth.	L’origine è il centro meccanico del cannone (DCS)
    elevation_cursor = 0.0  # 28	4	1	Float	deg	[-90..90]	Angolo di Elevazione.	L’origine è il centro meccanico del cannone (DCS)
    time_of_flight = 0.0  # 32	4	1	Float	s	[0..30]	Tempo di volo della munizione.
    range_future = np.uint(
        0)  # 36	4	1	UInt	m	[0..20000]	Range calcolo punto futuro

    def getListAttribute(self):
        return [
            "azimuth", "elevation", "azimuth_cursor", "elevation_cursor",
            "time_of_flight", "range_future"
        ]

    def process(self):
        pass

    @property
    def payload(self):
        return self.azimuth, \
               self.elevation, \
               self.azimuth_cursor, \
               self.elevation_cursor, \
               self.time_of_flight, \
               self.range_future
Example #53
0
def pre_process_isotropic(img_nii, seg_nii, use_isotropic, id, **kwargs):
    # Pre-process the data
    # Normalize the intensity of the images and also do some re-sample.
    #
    # If use_isotropic is True, we will resize all samples to same resolution,
    # otherwise, we will only resize the sample 4 and 5 (which have much low
    # resolution than other samples)

    if len(kwargs) < 4:
        id = 1

    if use_isotropic:
        scale = np.array(img_nii.shape) / np.array([0.80, 0.7588, 0.7588])
        new_resize = round(img_nii.shape * scale)
        img = imresize3d(img_nii, [], new_resize, 'reflect')
        seg = imresize3d(seg_nii, [], new_resize, 'reflect')
    else:
        if id == 4 or id == 5:
            # resize img in up an down plane
            img = np.transpose(img_nii, [2, 1, 0])
            img = resize(img, 1.5, mode='reflect')
            img = np.transpose(img, [2, 1, 0])
            seg = np.transpose(seg_nii, [2, 1, 0])
            seg = resize(seg, 1.5, 'reflect')
            seg = np.transpose(seg, [2, 1, 0])
        else:
            img = img_nii
            seg = seg_nii

    # Normalize the intensity of images
    mask = img > 0
    mean_value = np.mean(img[mask])
    std_value = np.std(img[mask])
    img = (img - mean_value) / std_value
    img = np.float32(img)
    seg = np.uint(seg)

    return img, seg
Example #54
0
def process(img, region):
    """
    Extract plate regions    
    :param img: input image
    :param region: region data
    """

    row, col = cfg.SCALE_DIM
    height, width, _ = img.shape

    region = np.uint(region)

    x1 = region[0]
    x2 = region[1]
    y1 = region[2]
    y2 = region[3]

    x1 = x1 * height // row
    x2 = x2 * height // row
    y1 = y1 * width // col
    y2 = y2 * width // col

    return img[x1:x2, y1:y2]
Example #55
0
def mpi_eye(size):
    """
    Produces an eye matrix according of shape (size,size)
    distributed over the various running MPI processes
    
    Parameters
    ----------
    size : integer
        distributed matrix size 
        
    Returns
    -------
    numpy.ndarray, double data type
    distributed eye matrix
    """
    log.debug('@ mpi_helper::mpi_eye')
    local_row_begin, local_row_end = mpi_arrange(size)
    local_matrix = np.zeros((local_row_end - local_row_begin, size),
                            dtype=np.float64)
    for i in range(local_row_end - local_row_begin):
        eye_pos = local_row_begin + np.uint(i)
        local_matrix[i, eye_pos] = 1.0
    return local_matrix
Example #56
0
 def setUpContainer(self):
     """ Return the test SweepTable to read/write """
     self.device = Device(name='device_name')
     self.elec = IntracellularElectrode(
         name="elec0",
         slice='tissue slice',
         resistance='something measured in ohms',
         seal='sealing method',
         description='a fake electrode object',
         location='Springfield Elementary School',
         filtering='a meaningless free-form text field',
         initial_access_resistance='I guess this changes',
         device=self.device)
     self.pcs = PatchClampSeries(name="pcs",
                                 data=[1, 2, 3, 4, 5],
                                 unit='A',
                                 starting_time=123.6,
                                 rate=10e3,
                                 electrode=self.elec,
                                 gain=0.126,
                                 stimulus_description="gotcha ya!",
                                 sweep_number=np.uint(4711))
     return SweepTable(name='sweep_table')
Example #57
0
def threshold(imageArray):
    balanceAr = []#balance array
    newAr = imageArray#複製一份imageArray 並命名為 new array 

    for eachRow in imageArray:#每row
        for eachPix in eachRow:#row中的Pixel (element)
            #print(eachPix)#試印每一橫排
            #time.sleep(3)#delay 3 seconds

            #不需要eachPix中的第四個值(alpha)
            avgNum = reduce(lambda x,y: x+y, np.uint(eachPix[:3]))/len(np.uint(eachPix[:3]))
            balanceAr.append(avgNum)
            
    #計算出整張圖的平均像素值, 所有像素值(0-255)加總/所有像素的數量
    balance = reduce(lambda x,y: x+y, balanceAr)/len(balanceAr)

    #print(f'balance: {balance}')

    for eachRow in newAr:
        for eachPix in eachRow:
            #如果「這一點」的平均像素值比總平均還大(代表「這一點」較亮)
            thisPixel = reduce(lambda x,y: x+y, np.uint(eachPix[:3]))/len(np.uint(eachPix[:3]))
            
            if reduce(lambda x,y: x+y, np.uint(eachPix[:3]))/len(np.uint(eachPix[:3])) > np.uint(balance):
                    
                #print(f'thisPixel: {thisPixel} is bigger than balance: {balance}')
                
                #「這一點」全部轉成白色
                
                eachPix[0] = 255#r
                eachPix[1] = 255#g
                eachPix[2] = 255#b
                eachPix[3] = 255#alpha
                
            else:
                #print(f'thisPixel: {thisPixel} is smaller than balance: {balance}')           
                #「這一點」全部轉成黑色
                
                eachPix[0] = 0#r
                eachPix[1] = 0#g
                eachPix[2] = 0#b
                eachPix[3] = 255#alpha
                
            #time.sleep(3)
    return newAr#將轉換完的array返回
Example #58
0
def show_images_64_64(images, final=None, post_process=None):
    #images = np.reshape(images, [images.shape[0], -1])  # images reshape to (batch_size, D)
    sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
    #sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))

    fig = plt.figure(figsize=(sqrtn, sqrtn))
    gs = gridspec.GridSpec(sqrtn, sqrtn)
    gs.update(wspace=0.05, hspace=0.05)

    for i, img in enumerate(images):
        ax = plt.subplot(gs[i])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        if post_process is not None:
            plt.imshow(np.uint8(post_process(img)))
        else:
            plt.imshow(np.uint(img))
        if final is not None:
            plt.savefig(final + '.jpg')
    plt.show()
    return
Example #59
0
    def predict(self, data_dic, data_var = None , data_of = None, data_of_mag = None ):
        """
        this function runs the prediction on the data
        :param data_dic:
        :return: the predicted values
        """
        if( self.classifier_name=='DUALINPUTUNET' ):
            temp = self.trained_model.predict([data_dic, data_var])
        elif  self.classifier_name=='TRIPPLEINPUTUNET'  :
            temp = self.trained_model.predict([data_dic, data_var, data_of])
        elif self.classifier_name=='FOURINPUTUNET'  :
            # temp = self.trained_model.predict([data_dic, data_var, data_of, data_of_mag])
            temp = self.trained_model.predict([data_var,   data_of_mag])
        else:
            temp = self.trained_model.predict(data_dic, batch_size=5)

        # print ( type( temp ) )
        # print( temp.shape )
        # print(np.max(temp) , np.mean( temp ), np.min( temp ) )
        temp = (temp>=0.5).astype(int)
        temp = np.sum(temp, axis=0)
        temp = (temp > 3).astype(int)
        # print ( temp.shape  , np.max( temp ), np.min(temp) )

        return  np.uint(temp) #
        # return np.uint(self.trained_model.predict(data_dic)[0])

        #
        # # X_test = X_test.reshape(X_test.shape + (1,))
        # ret={}
        # for item in data_dic:
        #
        #     temp =np.array([data_dic[item].reshape(data_dic[item].shape + (1,))])
        #     ret[item] =np.uint8( self.trained_model.predict(temp)[0].squeeze(axis=2) *255)
        # # ret = [{x: self.trained_model.predict()[0]} for x in data_dic]
        #
        # return ret
Example #60
0
class TestNumpyJSONEncoder(unittest.TestCase):
    @parameterized.expand(
        [(numpy.bool_(1), True), (numpy.bool8(1), True), (numpy.byte(1), 1),
         (numpy.int8(1), 1), (numpy.ubyte(1), 1), (numpy.uint8(1), 1),
         (numpy.short(1), 1), (numpy.int16(1), 1), (numpy.ushort(1), 1),
         (numpy.uint16(1), 1), (numpy.intc(1), 1), (numpy.int32(1), 1),
         (numpy.uintc(1), 1), (numpy.uint32(1), 1), (numpy.int_(1), 1),
         (numpy.int32(1), 1), (numpy.uint(1), 1), (numpy.uint32(1), 1),
         (numpy.longlong(1), 1), (numpy.int64(1), 1), (numpy.ulonglong(1), 1),
         (numpy.uint64(1), 1), (numpy.half(1.0), 1.0),
         (numpy.float16(1.0), 1.0), (numpy.single(1.0), 1.0),
         (numpy.float32(1.0), 1.0), (numpy.double(1.0), 1.0),
         (numpy.float64(1.0), 1.0), (numpy.longdouble(1.0), 1.0)] + ([
             (numpy.float128(1.0), 1.0)  # unavailable on windows
         ] if hasattr(numpy, 'float128') else []))
    def test_numpy_primary_type_encode(self, np_val, py_val):
        self.assertEqual(json.dumps(py_val),
                         json.dumps(np_val, cls=NumpyEncoder))

    @parameterized.expand([
        (numpy.array([1, 2, 3], dtype=numpy.int), [1, 2, 3]),
        (numpy.array([[1], [2], [3]], dtype=numpy.double), [[1.0], [2.0],
                                                            [3.0]]),
        (numpy.zeros((2, 2), dtype=numpy.bool_), [[False, False],
                                                  [False, False]]),
        (numpy.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
                     dtype=[('name', 'U10'), ('age', 'i4'),
                            ('weight', 'f4')]), [['Rex', 9, 81.0],
                                                 ['Fido', 3, 27.0]]),
        (numpy.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
                         dtype=[('foo', 'i4'), ('bar', 'f4'),
                                ('baz', 'U10')]), [[1, 2.0, "Hello"],
                                                   [2, 3.0, "World"]])
    ])
    def test_numpy_array_encode(self, np_val, py_val):
        self.assertEqual(json.dumps(py_val),
                         json.dumps(np_val, cls=NumpyEncoder))