Ejemplo n.º 1
0
    def frequency_response(self, N_points, freq_range=(0,200), mirror=False):
        """ Frequency response curve of the sensor

            Args:
                freq_range (tuple): min and max frequency, defining the frequency range of the response
                N_points (int): number of points generated in the curve (lenght of the response arrays)
                mirror (bool): if true generates a mirror of the response for negative frequencies. The
                    effective freq_range would be from -1*freq_range[1] to freq_range[1]  
            
            Returns:
                list: with two arrays, one of the frequency range array and the other with the corresponding
                    intensities, normalized from 0 to 1, 1 being the response in the resonant frequency.
        """
        if not mirror:
            f_array = np.linspace(*freq_range, N_points)
            freq_response = norm(scale=self.bandwidth/2, loc=self.resonant_freq).pdf(f_array)
            freq_response /= max(freq_response)
        else:
            f_array = np.linspace(*freq_range, N_points//2)
            freq_response = norm(scale=self.bandwidth/2, loc=self.resonant_freq).pdf(f_array)
            freq_response /= max(freq_response)
            mirrored = (np.flip(f_array*-1, 0), np.flip(freq_response, 0))
            f_array = np.hstack((mirrored[0], f_array))
            freq_response = np.hstack((freq_response, mirrored[1]))
        return [f_array, freq_response]
Ejemplo n.º 2
0
def convolution(matrix, filter):
    #flip filter
    filter = np.flip(filter, axis=0)
    filter = np.flip(filter, axis=1)
    print filter.shape
    print matrix.shape

    if filter.shape[1] % 2 == 0:
        return convolution_1d(matrix, filter)

    #gather shape/dimensions
    rows = matrix.shape[0]
    cols = matrix.shape[1]
    #first zero pad the matrix, but only if filter is of odd length
    start_index = 0 # by default, non padded matrices will start at 0,0
    col_end_index = cols  # same logic for end index
    row_end_index = rows
    if filter.shape[0] % 2 == 1:
        matrix = np.pad(matrix, pad_width=1, mode='constant', constant_values=0)
        start_index = 1 # if zero padded, then start index is going to be 1,1
        col_end_index += 1
        row_end_index += 1
    filtered_matrix = np.zeros((rows, cols))
    for x in range (start_index, col_end_index):
        for y in range (start_index, row_end_index):
            #calculate filter math
            sum = 0
            for i in range(filter.shape[0]):
                for j in range(filter.shape[1]):
                    #print 'Matrix value: ' + str(matrix[x-i+start_index][y+j-1])
                    #print 'filter value: ' + str(filter[i][j])
                    sum += np.multiply(matrix[x-i+start_index][y+j-1],filter[i][j])
            filtered_matrix[x-start_index][y-start_index] = sum
    return filtered_matrix
Ejemplo n.º 3
0
    def set_fft_form(self, fft_form, copy=False):
        if copy:
            R=self.copy()
        else:
            R=self

        if self.fft_form==fft_form:
            return R

        fft_form_orig = self.fft_form
        if R.Fourier:
            if fft_form_orig in ['r']:
                nval=np.flip(R.val[...,1:].conj(),axis=-1)
                for ax in self.axes[:-1]:
                    N,F = np.split(nval, [1], axis=ax)
                    nval=np.concatenate((N, np.flip(F, axis=ax)), axis=ax)
                if R.N[-1] % 2 == 0:
                    nval=nval[...,1:]
                val=np.concatenate((R.val,nval), axis=-1)
                R.val=1./np.prod(R.N)*val # fft_form=0
                if fft_form in ['c']:
                    R.val=np.fft.fftshift(R.val, axes=R.axes)
            elif fft_form_orig in ['c']:
                R.val=np.fft.ifftshift(R.val, axes=R.axes) # common for fft_form in [0,'r']
                if fft_form in ['r']:
                    R.val=R.val[...,:self.get_N_real(self.N)[-1]]*np.prod(self.N)
            elif fft_form_orig in [0]:
                if fft_form in ['c']:
                    R.val=np.fft.fftshift(R.val, axes=R.axes)
                else: # if fft_form in ['r']:
                    R.val=R.val[...,:self.get_N_real(self.N)[-1]]*np.prod(self.N)
        R._set_fft(fft_form)
        return R
Ejemplo n.º 4
0
def getFullMesh(left_mesh=None, right_mesh=None):
    """
    For a symmetric wing, OAS only keeps and does computation on the left half.
    This script mirros the OAS mesh and attaches it to the existing mesh to
    obtain the full mesh.

    Parameters
    ----------
    left_mesh[nx,ny,3] or right_mesh : numpy array
        The half mesh to be mirrored.

    Returns
    -------
    full_mesh[nx,2*ny-1,3] : numpy array
        The computed full mesh.
    """
    if left_mesh is None and right_mesh is None:
        raise ValueError("Either the left or right mesh need to be supplied.")
    elif left_mesh is not None and right_mesh is not None:
        raise ValueError("Please only provide either left or right mesh, not both.")
    elif left_mesh is not None:
        right_mesh = np.flip(left_mesh,axis=1).copy()
        right_mesh[:,:,1] *= -1
    else:
        left_mesh = np.flip(right_mesh,axis=1).copy()
        left_mesh[:,:,1] *= -1
    full_mesh = np.concatenate((left_mesh,right_mesh[:,1:,:]),axis=1)
    return full_mesh
Ejemplo n.º 5
0
    def prepare_buffer(self, buffer):
        if not self.init:
            raise IOError("Hardware need to be initialized for buffer preparation!")

        ACTIVATION_BITS = self.network_json['parameters']['ACTIVATION_BITS']

        if (ACTIVATION_BITS > 8):
            raise IOError("prepare buffer algorithm cannot handle more than 8 activation bits!")

        buffer = buffer.astype(np.uint8)
        dim = buffer.shape[1]
        #change shape to (dim,dim,chan)
        buffer = np.rollaxis(buffer,0,3)
        #transform channels to bits
        buffer = np.unpackbits(buffer, 2)
        #reshape to so that the fourth dimension always is one byte in bits
        buffer = buffer.reshape(dim, dim , -1, 8)
        #remove all the zero bits that we do not need, activation bits are left over
        buffer = np.delete(buffer, np.s_[0:(8-ACTIVATION_BITS)], 3)
        #flip left over bits, to fix endianness
        buffer = np.flip(buffer,3)
        #shape back to (dim, dim, chans in bits)
        buffer = np.reshape(buffer,(dim, dim, -1))
        #pad channels to multiple of 8
        if (buffer.shape[2] % 8):
            buffer = np.pad(buffer, [(0, 0), (0, 0), (0, 8 - (buffer.shape[2] % 8))], mode='constant')

        #fix endianness in 8 bits blocks
        buffer = np.reshape(buffer, (dim, dim, -1, 8))
        buffer = np.flip(buffer, 3)
        #pack bits together
        return np.packbits(buffer.reshape(dim, dim, -1), 2)
Ejemplo n.º 6
0
def parse_polynomial(s):
    '''Parse a polynomial string (e.g. 1101^7).
    
    Returns the corresponding polynomial in a Latex-friendly form.'''
    poly_str = ''
    p = s.split('^')
    if len(p) == 2:
        base = np.array([int(y) for y in p[0]])
        power = int(p[1])
        modulus = np.flip(base, axis=0)
    else:
        base = np.array([int(y) for y in s])
        power = 1
        modulus = np.flip(base, axis=0)
    for k in range(len(modulus)):
        if modulus[k] == 1:
            if poly_str != '':
                poly_str += '+'
            poly_str += ' z^{' + str(len(modulus)-k-1) + '}'
        elif modulus[k] not in [0, 1]:
            return ''
    if poly_str == '':
        return ''
    if power != 1:
        poly_str = '(' + poly_str + ')^{' + str(power) + '}'  
            
    return poly_str
Ejemplo n.º 7
0
 def testGetNonContiguous(self):
     """Check that we can index on non-contiguous tables"""
     # Make a non-contiguous catalog
     nonContiguous = type(self.catalog)(self.catalog.table)
     for rr in reversed(self.catalog):
         nonContiguous.append(rr)
     num = len(self.catalog)
     # Check assumptions
     self.assertFalse(nonContiguous.isContiguous())  # We managed to produce a non-contiguous catalog
     self.assertEqual(len(set(self.catalog["id"])), num)  # ID values are unique
     # Indexing with boolean array
     select = np.zeros(num, dtype=bool)
     select[1] = True
     self.assertEqual(nonContiguous[np.flip(select, 0)]["id"], self.catalog[select]["id"])
     # Extracting a number column
     column = "a_instFlux"
     array = nonContiguous[column]
     self.assertFloatsEqual(np.flip(array, 0), self.catalog[column])
     with self.assertRaises(ValueError):
         array[1] = 1.2345  # Should be immutable
     # Extracting a flag column
     column = "a_flag"
     array = nonContiguous[column]
     np.testing.assert_equal(np.flip(array, 0), self.catalog[column])
     with self.assertRaises(ValueError):
         array[1] = True  # Should be immutable
Ejemplo n.º 8
0
def set_raster_origin(data, coords, direction):
    """ Converts Data and Coordinates Origin

    Parameters
    ----------
    data : :class:`numpy:numpy.ndarray`
        Array of shape (rows, cols) or (bands, rows, cols) containing
        the data values.
    coords : :class:`numpy:numpy.ndarray`
        Array of shape (rows, cols, 2) containing xy-coordinates.
    direction : str
        'lower' or 'upper', direction in which to convert data and coordinates.

    Returns
    -------
    data : :class:`numpy:numpy.ndarray`
        Array of shape (rows, cols) or (bands, rows, cols) containing
        the data values.
    coords : :class:`numpy:numpy.ndarray`
        Array of shape (rows, cols, 2) containing xy-coordinates.
    """
    x_sp, y_sp = coords[1, 1] - coords[0, 0]
    origin = ('lower' if y_sp > 0 else 'upper')
    same = (origin == direction)
    if not same:
        data = np.flip(data, axis=-2)
        coords = np.flip(coords, axis=-3)
        # we need to shift y-coordinate if data and coordinates have the same
        # number of rows and cols (only the ll or ul raster coords are given)
        if data.shape[-2:] == coords.shape[:2]:
            coords += [0, y_sp]

    return data, coords
Ejemplo n.º 9
0
    def postprocess_buffer(self, buffer):
        if not self.init:
            raise IOError("Hardware need to be initialized for buffer preparation!")

        ACTIVATION_BITS = self.network_json['parameters']['ACTIVATION_BITS']
        if (ACTIVATION_BITS > 8):
            raise IOError("prepare buffer algorithm cannot handle more than 8 activation bits!")

        dim = buffer.shape[0]
        channels = buffer.shape[2]
        ele= math.ceil((channels * ACTIVATION_BITS) / 8)

        #delete entries that we don't need
        buffer = np.delete(buffer, np.s_[ele:], 2)
        #unpack bits
        buffer = np.unpackbits(buffer, 2)
        #fix endianness in 8 bits blocks
        buffer = buffer.reshape(dim,dim,-1,8)
        buffer = np.flip(buffer, 3)
        #delete bits that are left over
        if ((channels * ACTIVATION_BITS) % 8) != 0:
            buffer = np.delete(buffer, np.s_[channels * ACTIVATION_BITS:], 2)
        #reshape to that every channel value has its own value
        buffer = buffer.reshape(dim,dim,-1, ACTIVATION_BITS)
        #fix endianness of the single values
        buffer = np.flip(buffer,3)
        #packbits will append zeros at the end, so put them in front
        buffer = np.pad(buffer, [(0,0),(0,0),(0,0),((8-ACTIVATION_BITS),0)], mode='constant')
        #pack the bits to values again
        buffer = np.packbits(buffer,3)
        #fc layers are not intereseted in the shape
        return buffer.flatten().astype(np.float32)
Ejemplo n.º 10
0
def d21(data):
    # Requires Python3
    import numpy as np
    rules = {}
    for row in data.split('\n')[:-1]:
        src, trg = [e.split('/') for e in row.split(' => ')]
        src = np.array([list(r) for r in src])
        trg = np.array([list(r) for r in trg])
        # Original matrix
        rules[src.tobytes()] = trg
        # Rotated matrices
        rules[np.rot90(src, k=1).tobytes()] = trg
        rules[np.rot90(src, k=2).tobytes()] = trg
        rules[np.rot90(src, k=3).tobytes()] = trg
        # Flipped (and rotated) matrices
        rules[np.flip(src, axis=1).tobytes()] = trg
        # Rotated matrices
        rules[np.rot90(np.flip(src, axis=1), k=1).tobytes()] = trg
        rules[np.rot90(np.flip(src, axis=1), k=2).tobytes()] = trg
        rules[np.rot90(np.flip(src, axis=1), k=3).tobytes()] = trg

    # Starting grid
    grid = np.array([['.', '#', '.'], ['.', '.', '#'], ['#', '#', '#']])
    for _ in range(0, 18):
        if len(grid) % 2 == 0:
            tgrid = False
            for row in range(0, len(grid), 2):
                rgrid = np.array([[]])
                for col in range(0, len(grid), 2):
                    subset = grid[row:row + 2, col:col + 2]
                    if col == 0:
                        rgrid = rules[subset.tobytes()]
                    else:
                        rgrid = np.concatenate((rgrid,
                                                rules[subset.tobytes()]),
                                               axis=1)
                if row == 0:
                    tgrid = rgrid
                else:
                    tgrid = np.concatenate((tgrid, rgrid),
                                           axis=0)
        else:
            tgrid = False
            for row in range(0, len(grid), 3):
                rgrid = np.array([[]])
                for col in range(0, len(grid), 3):
                    subset = grid[row:row + 3, col:col + 3]
                    if col == 0:
                        rgrid = rules[subset.tobytes()]
                    else:
                        rgrid = np.concatenate((rgrid,
                                                rules[subset.tobytes()]),
                                               axis=1)
                if row == 0:
                    tgrid = rgrid
                else:
                    tgrid = np.concatenate((tgrid, rgrid), axis=0)
        grid = tgrid
    return (grid == '#').sum()
    def process_sample_specialized(self, features, label):

        if np.random.uniform() > self.prob:
            aug_f = np.flip(features, axis=1)
            aug_l = np.flip(label, axis=1)
            return aug_f, aug_l
        else:
            return features, label
Ejemplo n.º 12
0
 def init_dynamic_map(self,path_to_map="/home/racecar/racecar_ws/src/obstacle/maps/basement_fixed.png"):
     self.dynamic_map = cv.imread(path_to_map, cv.IMREAD_GRAYSCALE)
     self.dynamic_map = cv.threshold(self.dynamic_map, 127 ,255, cv.THRESH_BINARY)[1]
     free_space = self.dynamic_map > 127
     occupied_space = self.dynamic_map < 127
     self.dynamic_map[free_space] = 0
     self.dynamic_map[occupied_space] = 100
     np.flip(self.dynamic_map, 0)
Ejemplo n.º 13
0
Archivo: utils.py Proyecto: tboch/mocpy
def build_plotting_moc(moc, wcs):
    # Get the WCS cdelt giving the deg.px^(-1) resolution.
    cdelt = wcs.wcs.cdelt
    # Convert in rad.px^(-1)
    cdelt = np.abs((2*np.pi/360)*cdelt[0])
    # Get the minimum depth such as the resolution of a cell is contained in 1px. 
    depth_res = int(np.floor(np.log2(np.sqrt(np.pi/3)/cdelt)))
    depth_res = max(depth_res, 0)
    # Degrade the moc to that depth for plotting purposes. It is not necessary to plot pixels
    # that we will not see because they are contained in 1px.
    moc_plot = moc
    if moc.max_order > depth_res:
        moc_plot = moc.degrade_to_order(depth_res)

    moc_plot = moc_plot.refine_to_order(min_depth=2)

    # Get the MOC delimiting the FOV polygon
    width_px = int(wcs.wcs.crpix[0]*2.) # Supposing the wcs is centered in the axis
    heigth_px = int(wcs.wcs.crpix[1]*2.)

    # Compute the sky coordinate path delimiting the viewport.
    # It consists of a closed polygon of (4 - 1)*4 = 12 vertices
    x_px = np.linspace(0, width_px, 4)
    y_px = np.linspace(0, heigth_px, 4)

    X, Y = np.meshgrid(x_px, y_px)

    X_px = np.append(X[0, :-1], X[:-1, -1])
    X_px = np.append(X_px, np.flip(X[-1, 1:]))
    X_px = np.append(X_px, X[:-1, 0])

    Y_px = np.append(Y[0, :-1], Y[:-1, -1])
    Y_px = np.append(Y_px, Y[-1, :-1])
    Y_px = np.append(Y_px, np.flip(Y[1:, 0]))

    # Disable the output of warnings when encoutering NaNs.
    warnings.filterwarnings("ignore")
    # Inverse projection from pixel coordinate space to the world coordinate space
    viewport = pixel_to_skycoord(X_px, Y_px, wcs)
    # If one coordinate is a NaN we exit the function and do not go further
    ra_deg, dec_deg = viewport.icrs.ra.deg, viewport.icrs.dec.deg
    warnings.filterwarnings("default")

    if np.isnan(ra_deg).any() or np.isnan(dec_deg).any():
        return moc_plot

    center_x_px, center_y_px = wcs.wcs.crpix[0], wcs.wcs.crpix[1]
    inside = pixel_to_skycoord(center_x_px, center_y_px, wcs)

    # Import MOC here to avoid circular imports
    from ..moc import MOC
    # Create a rough MOC (depth=3 is sufficient) from the viewport
    moc_viewport = MOC.from_polygon_skycoord(viewport, max_depth=3, inside=inside)

    # The moc to plot is the INPUT_MOC & MOC_VIEWPORT. For small FOVs this can reduce
    # a lot the time to draw the MOC along with its borders.
    moc_plot = moc_plot.intersection(moc_viewport)
    return moc_plot
Ejemplo n.º 14
0
    def reflect(self, axis=0):
        """
        Reflect the lattice of control points along the direction defined
        by `axis`. In particular the origin point of the lattice is preserved.
        So, for instance, the reflection along x, is made with respect to the
        face of the lattice in the yz plane that is opposite to the origin.
        Same for the other directions. Only the weights (mu) along the chosen
        axis are reflected, while the others are preserved. The symmetry plane
        can not present deformations along the chosen axis.
        After the refletcion there will be 2n-1 control points along `axis`,
        witha doubled box length.

        :param int axis: axis along which the reflection is performed.
            Default is 0. Possible values are 0, 1, or 2, corresponding
            to x, y, and z respectively.
        """
        # check axis value
        if axis not in (0, 1, 2):
            raise ValueError(
                "The axis has to be 0, 1, or 2. Current value {}.".format(axis))

        # check that the plane of symmetry is undeformed
        if (axis == 0 and np.count_nonzero(self.array_mu_x[-1, :, :]) != 0) or (
                axis == 1 and np.count_nonzero(self.array_mu_y[:, -1, :]) != 0
        ) or (axis == 2 and np.count_nonzero(self.array_mu_z[:, :, -1]) != 0):
            raise RuntimeError(
                "If you want to reflect the FFD bounding box along axis " + \
                "{} you can not diplace the control ".format(axis) + \
                "points in the symmetry plane along that axis."
                )

        # double the control points in the given axis -1 (the symmetry plane)
        self.n_control_points[axis] = 2 * self.n_control_points[axis] - 1
        # double the box length
        self.box_length[axis] *= 2

        # we have to reflect the dispacements only along the correct axis
        reflection = np.ones(3)
        reflection[axis] = -1

        # we select all the indeces but the ones in the plane of symmetry
        indeces = [slice(None), slice(None), slice(None)]  # = [:, :, :]
        indeces[axis] = slice(1, None)  # = [1:]
        indeces = tuple(indeces)

        # we append along the given axis all the displacements reflected
        # and in the reverse order
        self.array_mu_x = np.append(
            self.array_mu_x,
            reflection[0] * np.flip(self.array_mu_x, axis)[indeces], axis=axis)
        self.array_mu_y = np.append(
            self.array_mu_y,
            reflection[1] * np.flip(self.array_mu_y, axis)[indeces], axis=axis)
        self.array_mu_z = np.append(
            self.array_mu_z,
            reflection[2] * np.flip(self.array_mu_z, axis)[indeces], axis=axis)
Ejemplo n.º 15
0
    def apply_filter_backwards(self, traces):
        for tr in traces:
            tr.data = np.flip(tr.data)

        traces = self.apply_filter()

        for tr in traces:
            tr.data = np.flip(tr.data)

        return traces
Ejemplo n.º 16
0
    def flip_data(data, mode):
        
        if mode in (1,2):
            return np.flipud(np.fliplr(data))

        elif mode == 3:
            return np.flip(np.flip(data,axis=1), axis=2)

        else:
            raise ValueError("Mode " + str(mode) + " not understood")
Ejemplo n.º 17
0
def _bfill(arr, n=None, axis=-1):
    '''inverse of ffill'''
    import bottleneck as bn

    arr = np.flip(arr, axis=axis)

    # fill
    arr = bn.push(arr, axis=axis, n=n)

    # reverse back to original
    return np.flip(arr, axis=axis)
Ejemplo n.º 18
0
def joinAR2(Extin, R_Extin, Distance, x5):
    step = np.abs(Distance[0] - Distance[1])
    ExtinAR1 = np.zeros(x5)
    ExtinAR2 = np.zeros(x5+1)
    ExtinAR1 = Extin[:x5]
    DistAR1 = Distance[:x5] - Distance[x5] - step
    ExtinAR2 = R_Extin[:x5+1]
    DistAR2 = abs(Distance[:x5+1] - Distance[x5])
    ExtinAR = np.append(ExtinAR1, np.flip(ExtinAR2))
    DistAR = np.append(DistAR1, np.flip(DistAR2))
    return ExtinAR, DistAR
Ejemplo n.º 19
0
	def get_subset(self):
		σ = np.median(sklearn.metrics.pairwise.pairwise_distances(self.X))
		N = self.X.shape[0]
		K_orig = self.rbk_sklearn(self.X, σ)
		[D,V] = np.linalg.eigh(K_orig)

		scaled_cumsum_D = np.cumsum(np.flip(D,0)/np.sum(D))
		eigLen = len(scaled_cumsum_D[scaled_cumsum_D < 0.95])
		largest_eigs = np.flip(D,0)[0:eigLen]
		largest_eigs = largest_eigs/np.sum(largest_eigs)

		for test_percent in np.arange(0.05,0.9,0.05):
			kd_list = []
			lowest_Kd = 100
			best_test_sample_id = None
			for rep in range(10):
				inc = int(np.floor(test_percent*N))
				if inc < eigLen: continue
		
				rp = np.random.permutation(N).tolist()
				test_set_id = rp[0:inc]
				sample_X = self.X[test_set_id,:]
		
				K_new = self.rbk_sklearn(sample_X, σ)

				[D,V] = np.linalg.eigh(K_new)
				small_eigs = np.flip(D,0)[0:eigLen]
				small_eigs = small_eigs/np.sum(small_eigs)
		
				Kd = np.max(np.absolute(largest_eigs - small_eigs))
				kd_list.append(Kd)
	
				if Kd < lowest_Kd:
					lowest_Kd = Kd
					#print(lowest_Kd)
					best_test_sample_id = test_set_id
					test_set_indx = list(set(rp) - set(best_test_sample_id))
		
			avg_kd = np.mean(kd_list)
			print('At %.3f percent, avg error : %.3f'%(test_percent, avg_kd))
			if avg_kd < self.threashold: break
	
		self.best_test_sample_id = best_test_sample_id
		self.new_X = self.X[best_test_sample_id,:]
		K_new = self.rbk_sklearn(self.new_X, σ)

		[D,V] = np.linalg.eigh(K_new)
		small_eigs = np.flip(D,0)[0:eigLen]
		small_eigs = small_eigs/np.sum(small_eigs)
		Kd = np.max(np.absolute(largest_eigs - small_eigs))
		print('\n%.3f percent was chosen with kernel divergence error of %.3f'%(test_percent, Kd))

		return [self.new_X, best_test_sample_id]
Ejemplo n.º 20
0
 def get_vision_image(self):
     resolution, image = check_ret(self.env.simxGetVisionSensorImage(
         self.handle,
         0,  # options=0 -> RGB
         blocking,
     ))
     dim, im = resolution, image
     nim = np.array(im, dtype='uint8')
     nim = np.reshape(nim, (dim[1], dim[0], 3))
     nim = np.flip(nim, 0)  # LR flip
     nim = np.flip(nim, 2)  # RGB -> BGR
     return nim
Ejemplo n.º 21
0
def count_mask(mask):
    """Count statistics and bounding box for given image mask"""
    count = int(mask.sum())
    if count == 0:
        return count, None, None, None, None

    # argmax for mask finds the first True value
    x_min = (mask.argmax(axis=0) != 0).argmax()
    x_max = mask.shape[1] - np.flip((mask.argmax(axis=0) != 0), axis=0).argmax() - 1
    w = (mask.shape[1] - np.flip((mask.argmax(axis=0) != 0), axis=0).argmax()
            - (mask.argmax(axis=0) != 0).argmax())
    h = (mask.shape[0] - np.flip((mask.argmax(axis=1) != 0), axis=0).argmax()
            - (mask.argmax(axis=1) != 0).argmax())
    return count, w, h, x_min, x_max
Ejemplo n.º 22
0
def go(rules_raw, iters=5):
    rules = {}
    for rule in rules_raw:
        a, b = rule.split("=>")
        frompat = np.array([
            list(map(int, list(x)))
            for x in a.replace("#", "1").replace('.', "0").strip().split(r'/')
        ]).tostring()
        topat = np.array([
            list(map(int, list(x)))
            for x in b.replace("#", "1").replace('.', "0").strip().split(r'/')
        ])
        rules[frompat] = topat

    grid = np.array([[0, 1, 0], [0, 0, 1], [1, 1, 1]])
    for iteration in range(iters):
        # print(grid, np.count_nonzero(grid), grid.shape)
        # print("-------------")
        if grid.shape[0] % 2 == 0:
            newdim = (grid.shape[0] // 2) * 3
        else:
            newdim = (grid.shape[0] // 3) * 4
        newgrid = np.zeros((newdim, newdim), dtype=int)
        split = 2 if grid.size % 2 == 0 else 3
        newsize = 4 if split == 3 else 3
        indices = product(range(grid.shape[0] // split), repeat=2)
        for i, j in indices:
            subgrid = grid[i * split:(i + 1) * split, j * split:(
                j + 1) * split]

            symmetries = [
                subgrid,
                np.flip(subgrid, 1),
                np.rot90(subgrid),
                np.flip(np.rot90(subgrid), 0),
                np.rot90(subgrid, 2),
                np.flip(np.rot90(subgrid, 2), 0),
                np.rot90(subgrid, 3),
                np.flip(np.rot90(subgrid, 3), 0)
            ]
            for symmetry in symmetries:
                if symmetry.tostring() in rules:
                    replacement = rules[symmetry.tostring()]
                    # print(f"found rule {symmetry} => {replacement}")
                    newgrid[i * newsize:(i + 1) * newsize, j * newsize:(
                        j + 1) * newsize] = replacement
                    break
        grid = newgrid
    print(grid, np.count_nonzero(grid), grid.shape)
Ejemplo n.º 23
0
def test_convert_weights():
    def get_model(shape, data_format):
        model = Sequential()
        model.add(Conv2D(filters=2,
                         kernel_size=(4, 3),
                         input_shape=shape,
                         data_format=data_format))
        model.add(Flatten())
        model.add(Dense(5))
        return model

    for data_format in ['channels_first', 'channels_last']:
        if data_format == 'channels_first':
            shape = (3, 5, 5)
            target_shape = (5, 5, 3)
            prev_shape = (2, 3, 2)
            flip = lambda x: np.flip(np.flip(x, axis=2), axis=3)
            transpose = lambda x: np.transpose(x, (0, 2, 3, 1))
            target_data_format = 'channels_last'
        elif data_format == 'channels_last':
            shape = (5, 5, 3)
            target_shape = (3, 5, 5)
            prev_shape = (2, 2, 3)
            flip = lambda x: np.flip(np.flip(x, axis=1), axis=2)
            transpose = lambda x: np.transpose(x, (0, 3, 1, 2))
            target_data_format = 'channels_first'

        model1 = get_model(shape, data_format)
        model2 = get_model(target_shape, target_data_format)
        conv = K.function([model1.input], [model1.layers[0].output])

        x = np.random.random((1,) + shape)

        # Test equivalence of convert_all_kernels_in_model
        convout1 = conv([x])[0]
        layer_utils.convert_all_kernels_in_model(model1)
        convout2 = flip(conv([flip(x)])[0])

        assert_allclose(convout1, convout2, atol=1e-5)

        # Test equivalence of convert_dense_weights_data_format
        out1 = model1.predict(x)
        layer_utils.convert_dense_weights_data_format(
            model1.layers[2], prev_shape, target_data_format)
        for (src, dst) in zip(model1.layers, model2.layers):
            dst.set_weights(src.get_weights())
        out2 = model2.predict(transpose(x))

        assert_allclose(out1, out2, atol=1e-5)
def sample_trajectory(M, n_states):
    # Samples trajectories from random nodes
    #  in our domain (M)
    G, W = M.get_graph_inv()
    N = G.shape[0]
    if N >= n_states:
        rand_ind = np.random.permutation(N)
    else:
        rand_ind = np.tile(np.random.permutation(N), (1, 10))
    init_states = rand_ind[0:n_states].flatten()
    goal_s = M.map_ind_to_state(M.targetx, M.targety)
    states = []
    states_xy = []
    states_one_hot = []
    # Get optimal path from graph
    g_dense = W
    g_masked = np.ma.masked_values(g_dense, 0)
    g_sparse = csr_matrix(g_dense)
    d, pred = dijkstra(g_sparse, indices=goal_s, return_predecessors=True)
    for i in range(n_states):
        path = trace_path(pred, goal_s, init_states[i])
        path = np.flip(path, 0)
        states.append(path)
    for state in states:
        L = len(state)
        r, c = M.get_coords(state)
        row_m = np.zeros((L, M.n_row))
        col_m = np.zeros((L, M.n_col))
        for i in range(L):
            row_m[i, r[i]] = 1
            col_m[i, c[i]] = 1
        states_one_hot.append(np.hstack((row_m, col_m)))
        states_xy.append(np.hstack((r, c)))
    return states_xy, states_one_hot
Ejemplo n.º 25
0
def dynamic_programming(maze, src, dst, wall_threshold=0.5):
    h, w = maze.shape
    visited = np.zeros_like(maze, dtype=np.bool)
    trace = np.zeros([h, w, 2], dtype=np.uint32)
    schedule = queue.Queue()
    #
    schedule.put(src)
    while True:
        x, y = schedule.get()
        if visited[y, x]:  # `schedule` may include repeated items
            continue
        visited[y, x] = True
        if (x, y) == dst:
            break
        else:
            for xx, yy in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:
                if 0 <= xx < w and 0 <= yy < h:
                    if visited[yy, xx]:
                        continue
                    if maze[yy, xx] > wall_threshold:
                        continue
                    schedule.put((xx, yy))
                    trace[yy, xx] = x, y
    path = []
    xx, yy = dst
    while True:
        path.append((xx, yy))
        x, y = trace[yy, xx]
        if src == (x, y):
            break
        xx, yy = x, y
    path = np.flip(path, axis=0)
    return path
Ejemplo n.º 26
0
    def wrapper(*args):
        x = args[0]
        w = args[1]
        if x.ndim == 3:
            w = np.flipud(w)
            w = np.transpose(w, (1, 2, 0))
            if args[3] == 'channels_last':
                x = np.transpose(x, (0, 2, 1))
        elif x.ndim == 4:
            w = np.fliplr(np.flipud(w))
            w = np.transpose(w, (2, 3, 0, 1))
            if args[3] == 'channels_last':
                x = np.transpose(x, (0, 3, 1, 2))
        else:
            w = np.flip(np.fliplr(np.flipud(w)), axis=2)
            w = np.transpose(w, (3, 4, 0, 1, 2))
            if args[3] == 'channels_last':
                x = np.transpose(x, (0, 4, 1, 2, 3))

        y = func(x, w, args[2], args[3])

        if args[3] == 'channels_last':
            if y.ndim == 3:
                y = np.transpose(y, (0, 2, 1))
            elif y.ndim == 4:
                y = np.transpose(y, (0, 2, 3, 1))
            else:
                y = np.transpose(y, (0, 2, 3, 4, 1))

        return y
Ejemplo n.º 27
0
    def _argsortData(self, data, order):
        if data.ndim == 1:
            indices = np.argsort(data, kind='mergesort')
            if order == Qt.DescendingOrder:
                indices = indices[::-1]
            # Always sort NaNs last
            if np.issubdtype(data.dtype, np.number):
                indices = np.roll(indices, -np.isnan(data).sum())
        else:
            assert np.issubdtype(data.dtype, np.number), \
                'We do not deal with non numeric values in sorting by ' \
                'multiple values'
            if order == Qt.DescendingOrder:
                data[:, -1] = -data[:, -1]

            # In order to make sure NaNs always appear at the end, insert a
            # indicator whether NaN or not. Note that the data array must
            # contain an empty column of zeros at index -2 since inserting an
            # extra column after the fact can result in a MemoryError for data
            # with a large amount of variables
            assert np.all(data[:, -2] == 0), \
                'Add an empty column of zeros at index -2 to accomodate NaNs'
            np.isnan(data[:, -1], out=data[:, -2])

            indices = np.lexsort(np.flip(data.T, axis=0))

        return indices
def test_pick():
    group = Cyclic(2)
    complex = MultiComplex.generate(group, 6)

    from pycomplex.math import linalg
    N = 1024
    points = np.moveaxis(np.indices((N, N)).astype(np.float), 0, -1) / (N - 1) * 2 - 1
    z = np.sqrt(np.clip(1 - linalg.dot(points, points), 0, 1))
    points = np.concatenate([points, z[..., None]], axis=-1)

    element_idx, sub_idx, quotient_idx, triangle_idx, bary = complex[-1].pick(points.reshape(-1, 3))

    print(bary.min(), bary.max())

    if True:
        col = bary
    else:
        col = np.array([
            sub_idx.astype(np.float) / sub_idx.max(),
            sub_idx * 0,
            quotient_idx.astype(np.float) / quotient_idx.max()
        ]).T

    plt.figure()
    img = np.flip(np.moveaxis(col.reshape(N, N, 3), 0, 1), axis=0)
    plt.imshow(img)

    plt.show()
Ejemplo n.º 29
0
    def get_image(self,img_name_list,idx,flip):
        img_name = os.path.join(self.training_image_path, img_name_list.iloc[idx])
        image = io.imread(img_name)

        # if grayscale convert to 3-channel image 
        if image.ndim==2:
            image=np.repeat(np.expand_dims(image,2),axis=2,repeats=3)
            
        # do random crop
        if self.random_crop:
            h,w,c=image.shape
            top=np.random.randint(h/4)
            bottom=int(3*h/4+np.random.randint(h/4))
            left=np.random.randint(w/4)
            right=int(3*w/4+np.random.randint(w/4))
            image = image[top:bottom,left:right,:]
            
        # flip horizontally if needed
        if flip:
            image=np.flip(image,1)
            
        # get image size
        im_size = np.asarray(image.shape)
        
        # convert to torch Variable
        image = np.expand_dims(image.transpose((2,0,1)),0)
        image = torch.Tensor(image.astype(np.float32))
        image_var = Variable(image,requires_grad=False)
        
        # Resize image using bilinear sampling with identity affine tnf
        image = self.affineTnf(image_var).data.squeeze(0)
        
        im_size = torch.Tensor(im_size.astype(np.float32))
        
        return (image, im_size)
Ejemplo n.º 30
0
    def test_kullback_leibler(self):
        n = 8
        mle = maxlike.Finite()

        # fetch and prepare data
        df1 = pd.read_csv(data_folder + "data_proba.csv", index_col=[0, 1])
        kwargs, _ = prepare_series(
            df1.stack(), {'N': np.sum})

        N = kwargs['N']

        # guess params
        h = (skellam_cdf_root(*(N.sum((0, 1)) / N.sum())[[0, 2]]) *
             np.array([-1, 1])).sum()

        S = N.sum(0) + np.flip(N.sum(1), 1)
        S /= S.sum(1)[:, None]

        s = pd.DataFrame(S[:, [0, 2]]).apply(lambda row:
            pd.Series(skellam_cdf_root(*row), index=['a', 'b']), 1)
        s = np.log(s).sub(np.log(s.mean()), 1)

        mle.add_param(s['a'].values)
        mle.add_param(-s['b'].values)
        mle.add_param(h)
        mle.add_constraint([0, 1], Linear([1, 1]))

        # define functions
        f1 = Sum(2)
        f1.add(X(), 0, 0)
        f1.add(-X(), 1, 1)
        f1.add(0.5 * Scalar(), 2, [])

        f2 = Sum(2)
        f2.add(X(), 0, 0)
        f2.add(-X(), 1, 1)
        f2.add(-0.5 * Scalar(), 2, [])

        F1 = Poisson(n) @ Exp() @ f1
        F2 = Poisson(n) @ Exp() @ f2

        F = Product(2, 2)
        F.add(F1, [0, 1, 2], [0, 1], 1)
        F.add(F2, [0, 1, 2], [1, 0], 0)

        mle.model = CollapseMatrix() @ F

        tol = 1e-8
        mle.fit(**kwargs, verbose=self.verbose)
        a, b, h = mle.params
        s_a, s_b, s_h = mle.std_error()

        df = pd.read_csv(data_folder + "test_kullback_leibler.csv")

        self.assertAlmostEqual(h,   0.27655587703454143, delta=tol)
        self.assertAlmostEqual(s_h, 0.0680302933547584, delta=tol)
        np.testing.assert_allclose(a, df['a'], atol=tol)
        np.testing.assert_allclose(b, df['b'], atol=tol)
        np.testing.assert_allclose(s_a, df['s_a'], atol=tol)
        np.testing.assert_allclose(s_b, df['s_b'], atol=tol)
Ejemplo n.º 31
0
    def plot_gamma_nu(self):
        if not (self.is_attribute("gamma") & self.is_attribute("nu")
                & self.is_attribute("intensity_plus_net")
                & self.is_attribute("intensity_minus_net")):
            return

        gamma = self.gamma
        nu = self.nu
        signal_plus = self.intensity_plus_net
        signal_minus = self.intensity_minus_net
        bkg_calc = self.intensity_bkg_calc
        signal_sum = signal_plus + signal_minus + bkg_calc
        signal_difference = signal_plus - signal_minus

        flag_polarized, flag_unpolarized = False, False
        if self.is_attribute("intensity_plus"):
            flag_polarized = True
        if self.is_attribute("intensity"):
            flag_unpolarized = True

        if flag_polarized:
            signal_exp_plus = self.intensity_plus
            signal_exp_minus = self.intensity_minus
            signal_exp_plus_sigma = self.intensity_plus_sigma
            signal_exp_minus_sigma = self.intensity_minus_sigma
            signal_exp_sum = signal_exp_plus + signal_exp_minus
            signal_exp_difference = signal_exp_plus - signal_exp_minus
            signal_exp_sum_sigma_sq = numpy.square(
                signal_exp_plus_sigma) + numpy.square(signal_exp_minus_sigma)
            signal_exp_sum_sigma = numpy.sqrt(signal_exp_sum_sigma_sq)
        elif flag_unpolarized:
            signal_exp_sum = self.intensity
            signal_exp_sum_sigma = self.intensity_sigma
            signal_exp_sum_sigma_sq = numpy.square(signal_exp_sum_sigma)

        excluded_points = numpy.logical_or(self.excluded_points,
                                           numpy.isnan(signal_exp_sum))
        included_points = numpy.logical_not(excluded_points)

        signal_em_sum = numpy.concatenate(
            [numpy.flip(signal_sum, axis=1), signal_exp_sum], axis=1)
        if flag_polarized:
            signal_em_difference = numpy.concatenate(
                [numpy.flip(signal_difference, axis=1), signal_exp_difference],
                axis=1)

        max_val = max([
            numpy.nanmax(signal_exp_sum[included_points]),
            numpy.nanmax(signal_sum[included_points])
        ])
        min_val = numpy.nanmin(signal_em_sum)
        max_val = max_val - (max_val - min_val) * 0.25
        n_sigma_threshold = 20.

        alpha_excl_points = numpy.zeros(signal_exp_sum.shape, dtype=int)
        alpha_excl_points[excluded_points] = 200
        alpha_em_sum = numpy.concatenate(
            [numpy.zeros_like(alpha_excl_points), alpha_excl_points],
            axis=1).transpose()

        zz_sum = numpy.stack([
            numpy.concatenate(
                [numpy.zeros_like(alpha_excl_points), alpha_excl_points],
                axis=1).transpose(),
            numpy.concatenate(
                [numpy.zeros_like(alpha_excl_points), alpha_excl_points],
                axis=1).transpose(),
            numpy.concatenate(
                [numpy.zeros_like(alpha_excl_points), alpha_excl_points],
                axis=1).transpose(), alpha_em_sum
        ],
                             axis=2)

        extent = [gamma.min(), gamma.max(), nu.min(), nu.max()]
        cmap_sum = plt.get_cmap("turbo")  # BuPu

        if flag_polarized:
            fig, axs = plt.subplots(2, 2)
            ax1, ax2, ax3, ax4 = axs[0, 0], axs[0, 1], axs[1, 0], axs[1, 1]
        elif flag_unpolarized:
            fig, axs = plt.subplots(2, 1, sharex=True)
            ax1, ax3 = axs[0], axs[1]

        norm_1 = matplotlib.colors.Normalize(vmax=max_val, vmin=min_val)
        ax1.imshow(signal_em_sum.transpose(),
                   origin="lower",
                   aspect="auto",
                   cmap=cmap_sum,
                   norm=norm_1,
                   extent=extent)
        ax1.imshow(zz_sum,
                   origin="lower",
                   aspect="auto",
                   alpha=0.8,
                   extent=extent)

        ax1.set_xticks([])
        ax1.set_xlabel(r"$\gamma$ (deg.)")
        ax1.set_yticks([])
        ax1.set_ylabel("  Model       Experiment")

        if flag_polarized:
            cmap_difference = plt.get_cmap("turbo")  # BrBG
            max_val = numpy.nanmax(numpy.abs(signal_em_difference)) * 0.50
            norm_2 = matplotlib.colors.Normalize(vmax=max_val, vmin=-max_val)
            ax2.imshow(signal_em_difference.transpose(),
                       origin="lower",
                       aspect="auto",
                       norm=norm_2,
                       cmap=cmap_difference,
                       extent=extent)

            ax2.set_xticks([])
            ax2.set_xlabel(r"$\gamma$ (deg.)")
            ax2.set_yticks([])

        ax1.set_ylabel("  Model       Experiment")

        chi_sq_per_n_sum = numpy.nansum(
            numpy.square((signal_exp_sum - signal_sum) * included_points /
                         signal_exp_sum_sigma)) / numpy.sum(included_points)

        ax1.set_title(r"Unpolarized signal $\chi^2/n=$" +
                      f"{chi_sq_per_n_sum:.2f}")

        ttheta, signal_projection_sum, signal_projection_exp_sum, signal_projection_exp_sum_sigma, \
            signal_projection_difference, signal_projection_exp_difference, signal_projection_exp_difference_sigma = \
            self.calc_projections_sum_difference()

        ax3.errorbar(ttheta[:gamma.size],
                     signal_projection_exp_sum[:gamma.size],
                     yerr=signal_projection_exp_sum_sigma[:gamma.size],
                     fmt="ko",
                     alpha=0.2,
                     label="experiment")
        ax3.plot(ttheta[:gamma.size],
                 signal_projection_sum[:gamma.size],
                 "b-",
                 label="model",
                 linewidth=2)

        y_min_d, y_max_d = ax3.get_ylim()
        param = y_min_d - numpy.nanmax(
            (signal_projection_exp_sum - signal_projection_sum)[:gamma.size])
        ax3.plot([ttheta[:gamma.size].min(), ttheta[:gamma.size].max()],
                 [param, param], "k:")
        ax3.plot(
            ttheta[:gamma.size],
            (signal_projection_exp_sum - signal_projection_sum)[:gamma.size] +
            param,
            "r-",
            alpha=0.5)  # , label="difference"

        # ax3.plot(ttheta[:gamma.size], signal_projection_exp_sum[:gamma.size], "b.", label="experiment")
        # ax3.plot(ttheta[:gamma.size], (signal_projection_exp_sum-signal_projection_sum)[:gamma.size], "r-")
        ax3.legend()
        ax3.set_xlabel(r"$2\theta$ (deg.)")
        # ax3.get_yaxis().set_visible(False)

        if flag_polarized:
            chi_sq_per_n_difference = numpy.nansum(
                numpy.square((signal_exp_difference - signal_difference) /
                             signal_exp_sum_sigma)) / numpy.product(
                                 signal_exp_difference.shape)
            ax2.set_title(r"Polarized signal $\chi^2/n=$" +
                          f"{chi_sq_per_n_difference:.2f}")
            ax4.errorbar(
                ttheta[:gamma.size],
                signal_projection_exp_difference[:gamma.size],
                yerr=signal_projection_exp_difference_sigma[:gamma.size],
                fmt="ko",
                alpha=0.2,
                label="experiment")
            ax4.plot(ttheta[:gamma.size],
                     signal_projection_difference[:gamma.size],
                     "b-",
                     label="model",
                     linewidth=2)

            y_min_d, y_max_d = ax4.get_ylim()
            param = y_min_d - numpy.nanmax(
                (signal_projection_exp_difference -
                 signal_projection_difference)[:gamma.size])
            ax4.plot([ttheta[:gamma.size].min(), ttheta[:gamma.size].max()],
                     [param, param], "k:")
            ax4.plot(ttheta[:gamma.size],
                     (signal_projection_exp_difference -
                      signal_projection_difference)[:gamma.size] + param,
                     "r-",
                     alpha=0.5)  #, label="difference"
            ax4.legend()
            ax4.set_xlabel(r"$2\theta$ (deg.)")
            # ax4.get_yaxis().set_visible(False)

        return (fig, ax1)
Ejemplo n.º 32
0
def rot180(ndarray):
    rot = np.flip(np.flip(ndarray, 1), 0)
    return rot
Ejemplo n.º 33
0
 def read(self):
     frames = self.pipeline.wait_for_frames()
     color_frame = frames.get_color_frame()
     color_image = np.asanyarray(color_frame.get_data())
     return np.flip(color_image, -1).copy()
def plot_stacked_bar(data,
                     series_labels,
                     category_labels=None,
                     show_values=False,
                     value_format="{}",
                     y_label=None,
                     colors=None,
                     grid=False,
                     reverse=False):
    """Plots a stacked bar chart with the data and labels provided.

    Keyword arguments:
    data            -- 2-dimensional numpy array or nested list
                       containing data for each series in rows
    series_labels   -- list of series labels (these appear in
                       the legend)
    category_labels -- list of category labels (these appear
                       on the x-axis)
    show_values     -- If True then numeric value labels will
                       be shown on each bar
    value_format    -- Format string for numeric value labels
                       (default is "{}")
    y_label         -- Label for y-axis (str)
    colors          -- List of color labels
    grid            -- If True display grid
    reverse         -- If True reverse the order that the
                       series are displayed (left-to-right
                       or right-to-left)
    """

    ny = len(data[0])
    ind = list(range(ny))

    axes = []
    cum_size = np.zeros(ny)

    data = np.array(data)

    if reverse:
        data = np.flip(data, axis=1)
        category_labels = reversed(category_labels)

    for i, row_data in enumerate(data):
        color = colors[i] if colors is not None else None
        axes.append(
            plt.bar(ind,
                    row_data,
                    bottom=cum_size,
                    label=series_labels[i],
                    color=color))
        cum_size += row_data

    if category_labels:
        plt.xticks(ind, category_labels)

    if y_label:
        plt.ylabel(y_label)

    plt.legend()

    if grid:
        plt.grid()

    if show_values:
        i = 0
        for axis in axes:
            for bar in axis:
                w, h = bar.get_width(), bar.get_height()
                plt.text(bar.get_x() + w / 2,
                         bar.get_y() + h / 2,
                         value_format.format(int(h), float(new_array[i])),
                         ha="center",
                         va="center")
                i = i + 1
Ejemplo n.º 35
0
def print_board(board):
	print(np.flip(board, 0))
Ejemplo n.º 36
0
def random_horizontal_flip(imgs):
    if random.random() < 0.5:
        for i in range(len(imgs)):
            imgs[i] = np.flip(imgs[i], axis=1).copy()
    return imgs
Ejemplo n.º 37
0
    def forward(self, x, dil, gconv, p4m):
        if gconv:
            ww = self.weight
            sh = list(ww.size())
            w = np.arange(np.prod(sh))
            w = w.reshape(sh)
            w_list = []

            bb = self.bias
            shb = list(bb.size())
            b = np.arange(np.prod(shb))
            b = b.reshape(shb)
            b_list = []

            for rot in range(4):
                for flip in range(2):
                    w_mod = w
                    w_mod = np.rot90(w_mod, rot, axes=(2, 3))

                    if flip == 1:
                        w_mod = np.flip(w_mod, axis=2)

                    w_list.append(w_mod)

                    b_list.append(b)

            w = np.concatenate(w_list, axis=0)
            sh_w = np.shape(w)
            w = w.reshape(-1)

            b = np.concatenate(b_list, axis=0)
            sh_b2 = np.shape(b)
            b = b.reshape(-1)

            bb = bb.view(-1)
            bb = bb[b]
            bb = bb.view(sh_b2)
            bb = bb.contiguous()

            ww = ww.view(-1)
            ww = ww[w]
            ww = ww.view(sh_w)
            ww = ww.contiguous()

            if p4m:
                s = x.size()
                x = x.view(s[0], s[1] * s[2], s[3], s[4])
                x = F.conv2d(x, ww, bb, self.stride, self.padding, dil,
                             self.groups)
                x = x.view(s[0], int(x.size()[1] / 8), 8,
                           x.size()[2],
                           x.size()[3])
            else:
                s = x.size()
                x = F.conv2d(x, ww, bb, self.stride, self.padding, dil,
                             self.groups)
                x = x.view(s[0], int(x.size()[1] / 8), 8,
                           x.size()[2],
                           x.size()[3])

        else:
            x = F.conv2d(x, ww, b, self.stride, self.padding, dil, self.groups)

        return x
Ejemplo n.º 38
0
def test_fft_kmq(nalpha):
    # Create regular grid.
    nmax = 2
    mesh = [2*nmax+1]*1
    grid, eigs = generate_fft_grid(mesh)
    
    qmax = 2*nmax
    qmesh = [2*qmax+1]*1
    qgrid, qeigs = generate_fft_grid(qmesh)

    # Create wavefunction
    nbasis = len(grid)
    
    numpy.random.seed(7)

    psi = get_random_wavefunction((nalpha,nalpha), nbasis)
    I = numpy.eye(nbasis, dtype=numpy.complex128)
    # print(I.shape)
    I = get_random_wavefunction((nalpha,nalpha), nbasis)
    # print(I.shape)
    # exit()

    # Select lowest energy states for trial
    trial = I[:,:nalpha].conj()
    trial.imag[:] = 0
    
    G, Gh = gab_mod(trial,psi[:,:nalpha])
    nqgrid = numpy.prod(qmesh)

    # # Check by direct convolution f(q) = \sum_G Psi[G+Q] Gh[G].
    print(grid.shape)
    fq_direct = numpy.zeros(nqgrid, dtype=numpy.complex128)
    for iq, q in enumerate(qgrid):
        Gtrace_direct = 0
        # for a in range(nalpha):
        # compute \sum_G f_G(q)
        for i, k in enumerate(grid):
            # kmq = q-k
            kmq = k-q
            ikmq = lookup(kmq, grid)
            # idx = numpy.argwhere(basis == q-k)
            # print(idx, ikmq)
            if ikmq is not None:
                Gtrace_direct += trial[i,0] * Gh[0,ikmq]
        fq_direct[iq] = Gtrace_direct
    print("trial[igmq,0] = {}".format(trial[:,0]))
    print("Gh[0,i] = {}".format(Gh[0,:]))
    # Check by fft convolve
    # Compare to fq
    fq_conv = numpy.zeros(nqgrid, dtype=numpy.complex128)
    trial_pq = trial[:,0].copy()
    Gh_pq = Gh[0,:].copy()

    # \sum_G f(G-Q) g(G)
    # -G-Q -> G'
    # \sum_G g(-G-Q) f(-G) = \sum_G g(G) f(G+Q)
    # trial_pq = numpy.conj(trial_pq)
    fq_conv += nqgrid*convolve(Gh_pq, numpy.flip(trial_pq), mesh)
    fq_conv = numpy.flip(fq_conv)

    import scipy.signal
    fq_conv_sc = numpy.flip(scipy.signal.fftconvolve(
            Gh_pq,numpy.flip(trial_pq)).ravel())

    # for i in range(qmesh[0]):
    #     target = fq_conv[i]
    #     print(numpy.abs(target))
    #     if (numpy.abs(target) > 1e-8):
    #         # print(target)
    #         idx = numpy.argwhere(numpy.abs(fq_direct - target) < 1e-8)
    #         # print(i, idx)


    import matplotlib.pyplot as pl
    pl.plot(fq_conv, label='fft')
    pl.plot(fq_conv_sc, label='fft_scipy')
    pl.plot(fq_direct, label='direct')
    # pl.plot(fq, label='from_gf')
    pl.legend()
    pl.show()
Ejemplo n.º 39
0
Z1 = np.where(np.logical_and(verticalLoad >= 0, verticalLoad <= 320))
Z2 = np.where(np.logical_and(verticalLoad >= 320, verticalLoad <= 550))
Z3 = np.where(np.logical_and(verticalLoad >= 550, verticalLoad <= 750))
Z4 = np.where(np.logical_and(verticalLoad >= 750, verticalLoad <= 950))
Z5 = np.where(np.logical_and(verticalLoad >= 980, verticalLoad <= 1200))

labelAvgZ1 = str(np.round(np.average(verticalLoad[Z1]))) + (' N')
labelAvgZ2 = str(np.round(np.average(verticalLoad[Z2]))) + (' N')
labelAvgZ3 = str(np.round(np.average(verticalLoad[Z3]))) + (' N')
labelAvgZ4 = str(np.round(np.average(verticalLoad[Z4]))) + (' N')
labelAvgZ5 = str(np.round(np.average(verticalLoad[Z5]))) + (' N')

d = 10

x1 = np.flip(np.sort(slipAngle[Z1]))
y1 = np.sort(lateralForce[Z1])
curve1 = np.polyfit(x1, y1, d)
poly1 = np.poly1d(curve1)

x2 = np.flip(np.sort(slipAngle[Z2]))
y2 = np.sort(lateralForce[Z2])
curve2 = np.polyfit(x2, y2, d)
poly2 = np.poly1d(curve2)

x3 = np.flip(np.sort(slipAngle[Z3]))
y3 = np.sort(lateralForce[Z3])
curve3 = np.polyfit(x3, y3, d)
poly3 = np.poly1d(curve3)

x4 = np.flip(np.sort(slipAngle[Z4]))
Ejemplo n.º 40
0
class TestRectGridInterpolation:
    ul = (2, 55)
    lr = (12, 45)

    x = np.linspace(ul[0], lr[0], 101)
    y = np.linspace(ul[1], lr[1], 51)

    grids = {}
    valgrids = {}
    X, Y = np.meshgrid(x, y)
    grids["image_upper"] = np.stack((X, Y), axis=-1)
    valgrids["image_upper"] = X + Y

    y = np.flip(y)

    X, Y = np.meshgrid(x, y)
    grids["image_lower"] = np.stack((X, Y), axis=-1)
    valgrids["image_lower"] = X + Y

    Y, X = np.meshgrid(y, x)
    grids["plot"] = np.stack((X, Y), axis=-1)
    valgrids["plot"] = X + Y

    xt = np.random.uniform(ul[0], lr[0], 10000)
    yt = np.random.uniform(lr[1], ul[1], 10000)
    points = np.stack((xt, yt), axis=-1)
    valpoints = xt + yt

    grid = grids["image_upper"]
    valgrid = valgrids["image_upper"]
    grid2 = (grid - ul) / 2 + ul
    valgrid2 = valgrid / 2

    def test_rect_grid(self, get_rect_method):
        for indexing, src in self.grids.items():
            ip = ipol.RectGrid(src, self.points, method=get_rect_method)
            assert ("image" in indexing) == ip.image
            assert ("upper" in indexing) == ip.upper
            valip = ip(self.valgrids[indexing])
            bad = np.isnan(valip)
            pbad = np.sum(bad) / bad.size
            assert pbad == 0

        for indexing, trg in self.grids.items():
            ip = ipol.RectGrid(self.grid2, trg, method=get_rect_method)
            valip = ip(self.valgrid2)
            assert valip.shape == trg.shape[:-1]
            bad = np.isnan(valip)
            pbad = np.sum(bad) / bad.size
            assert abs(pbad - 0.75) < 0.1

            ip = ipol.RectGrid(self.grid, self.grid2, method=get_rect_method)
            valip = ip(self.valgrid)
            bad = np.isnan(valip)
            pbad = np.sum(bad) / bad.size
            assert pbad == 0

            ip2 = ipol.RectGrid(self.grid2, self.grid, method=get_rect_method)
            valip2 = ip2(valip)
            bad = np.isnan(valip2)
            pbad = np.sum(bad) / bad.size
            assert abs(pbad - 0.75) < 0.1
            np.testing.assert_allclose(self.valgrid[~bad], valip2[~bad])

    def test_rect_bin(self):
        ip = ipol.RectBin(self.points, self.grid)
        valip = ip(self.valpoints)
        assert valip.shape == self.grid.shape[:-1]

        grid2 = self.grid2 + (-0.01, 0.01)
        ip = ipol.RectBin(grid2, self.grid)
        valip = ip(self.valgrid2)
        assert valip.shape == self.grid.shape[:-1]

        bad = np.isnan(valip)
        pbad = np.sum(bad) / bad.size
        assert abs(pbad - 0.75) < 0.1

        firstcell = self.valgrid2[0:2, 0:2]
        mean = np.mean(firstcell.ravel())
        np.testing.assert_allclose(mean, valip[0, 0])

        ip = ipol.RectBin(self.points, self.grid)
        ip(self.valpoints, statistic="median")

        ip = ipol.RectBin(self.points, self.grid)
        res0 = ip(self.valpoints, statistic="median")
        res0a = ip.binned_stats.statistic.copy()
        if ip.upper:
            res0a = np.flip(res0a, ip.ydim)
        res1 = ip(self.valpoints, statistic="median")

        np.testing.assert_allclose(res0, res1)
        np.testing.assert_allclose(res0a, res1)

    def test_QuadriArea(self):
        grid2 = self.grid2 + (-0.01, 0.01)
        ip = ipol.QuadriArea(grid2, self.grid)
        valgrid2 = self.valgrid2[1:, 1:]
        valip = ip(valgrid2)
        assert valip.shape == tuple([el - 1 for el in self.grid.shape[:-1]])

        bad = np.isnan(valip)
        pbad = np.sum(bad) / bad.size
        assert abs(pbad - 0.75) < 0.1

        firstcell = valgrid2[0:3, 0:3]
        weights = np.array(
            [
                [72 / 100, 9 / 10, 18 / 100],
                [8 / 10, 1, 2 / 10],
                [8 / 100, 1 / 10, 2 / 100],
            ]
        )
        ref = np.sum(np.multiply(firstcell, weights)) / np.sum(weights)
        np.testing.assert_allclose(ref, valip[0, 0])

    def test_IpolChain(self):
        ip1 = ipol.RectGrid(self.grid, self.points)
        ip2 = ipol.RectGrid(self.grid, self.points)
        ipol.IpolChain((ip1, ip2))
Ejemplo n.º 41
0
print("--- running SNB ---")

call(["./snb", str(Tw)])

#Intensity

print("--- reading intensity ---")
data = np.loadtxt("I")


print("--- calculating flux ---")
# DOM
#
Ip = data[:,1]
Im = -np.flip(Ip,axis=0)

#flux

q = Ip + Im

plt.plot(x,q, marker = 'o')
plt.show()
plt.clf()


print("--- calculating radiative source term ---")


# source term
Ejemplo n.º 42
0
    def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        #
        lbl[lbl == 255] = 0

        #random scaleSizeCrop
        use_random_crop = True
        if (use_random_crop):
            if (self.split != 'val'):
                for attempt in range(100):
                    areas = img.shape[0] * img.shape[1]
                    target_area = random.uniform(
                        0.5,
                        1) * areas  #input:512 ,range(198,360)and resizeto 256

                    w = int(round(np.sqrt(target_area)))
                    h = int(round(np.sqrt(target_area)))

                    if w <= img.shape[1] and h <= img.shape[0]:
                        x1 = random.randint(0, img.shape[1] - w)
                        y1 = random.randint(0, img.shape[0] - h)

                        img = img[y1:y1 + h, x1:x1 + w]
                        lbl = lbl[y1:y1 + h, x1:x1 + w]
                        if (((img.shape[1], img.shape[0]) == (w, h))
                                and ((lbl.shape[1], lbl.shape[0]) == (w, h))):
                            break
                assert ((img.shape[1], img.shape[0]) == (w, h))
                assert ((lbl.shape[1], lbl.shape[0]) == (w, h))
            else:
                w, h = img.shape[1], img.shape[0]
                th, tw = self.img_size[0], self.img_size[1]
                x1 = int(round((w - tw) / 2.))
                y1 = int(round((h - th) / 2.))
                img = img[y1:y1 + h, x1:x1 + w]
                lbl = lbl[y1:y1 + h, x1:x1 + w]

        #random rotate
        if (random.random() < 0.5 and self.split != 'val'):
            angle = random.randint(-90, 90)
            img = rotate(img, angle, mode='symmetric', preserve_range=True)
            lbl = rotate(lbl,
                         angle,
                         mode='symmetric',
                         order=0,
                         preserve_range=True)

        #random vertically flip
        if (random.random() < 0.5 and self.split != 'val'):
            img = np.flip(img, axis=0)
            lbl = np.flip(lbl, axis=0)
            #print "vertically flip"

        #random horizontally flip
        if (random.random() < 0.5 and self.split != 'val'):
            img = np.flip(img, axis=1)
            lbl = np.flip(lbl, axis=1)
            #print "horizontally flip"

        img = np.array(
            Image.fromarray(np.uint8(img)).resize(
                (self.img_size[0], self.img_size[1])))
        img = img.astype(float) / 255.0
        img -= self.mean
        img = img / self.std
        #NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl = lbl.astype(float)
        lbl = np.array(
            Image.fromarray(np.uint8(lbl)).resize(
                (self.img_size[0], self.img_size[1])))

        lbl = lbl.astype(int)
        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
Ejemplo n.º 43
0
def test_fft_kpq(nalpha):
    # Create regular grid.
    nmax = 2
    mesh = [2*nmax+1]*1
    grid, eigs = generate_fft_grid(mesh)
    
    qmax = 2*nmax
    qmesh = [2*qmax+1]*1
    qgrid, qeigs = generate_fft_grid(qmesh)

    # Create wavefunction
    nbasis = len(grid)
    
    numpy.random.seed(7)

    psi = get_random_wavefunction((nalpha,nalpha), nbasis)
    I = numpy.eye(nbasis, dtype=numpy.complex128)
    # print(I.shape)
    I = get_random_wavefunction((nalpha,nalpha), nbasis)
    # print(I.shape)
    # exit()

    # Select lowest energy states for trial
    trial = I[:,:nalpha].conj()
    trial.imag[:] = 0
    G, Gh = gab_mod(trial,psi[:,:nalpha])
    nqgrid = numpy.prod(qmesh)

    # # Check by direct convolution f(q) = \sum_G Psi[G+Q] Gh[G].
    print(grid.shape)
    fq_direct = numpy.zeros(nqgrid, dtype=numpy.complex128)
    for iq, q in enumerate(qgrid):
        Gtrace_direct = 0
        # for a in range(nalpha):
        # compute \sum_G f_G(q)
        for i, k in enumerate(grid):
            kpq = k+q
            ikpq = lookup(kpq, grid)
            if ikpq is not None:
                Gtrace_direct += trial[ikpq,0] * Gh[0,i]
        fq_direct[iq] = Gtrace_direct

    # Check by fft convolve
    # Compare to fq
    fq_conv = numpy.zeros(nqgrid, dtype=numpy.complex128)
    trial_pq = trial[:,0].copy()
    Gh_pq = Gh[0,:].copy()

    fq_conv += nqgrid*convolve(Gh_pq, numpy.flip(trial_pq), mesh)
    fq_conv = numpy.flip(fq_conv)

    import scipy.signal
    fq_conv_sc = numpy.flip(scipy.signal.fftconvolve(
            Gh_pq,numpy.flip(trial_pq)).ravel())

    import matplotlib.pyplot as pl
    pl.plot(fq_conv, label='fft')
    pl.plot(fq_conv_sc, label='fft_scipy')
    pl.plot(fq_direct, label='direct')
    # pl.plot(fq, label='from_gf')
    pl.legend()
    pl.show()
        mm.loadData()
    else:
        mm.allSumAmp_mes = matlab_val
    
    ''' Create computational graph'''
    mm.create_graph(obj, if_xla=0)

'''Read object from file'''
matlab_val_file = '/Users/bene/Dropbox/Dokumente/Promotion/PROJECTS/multiSCAT/PYTHON/muScat/VascuSynth/obj.mat'
matlab_val = data.import_realdata_mat(filename = matlab_val_file, is_complex = False)
# convert the object to experimental parameters
obj =  (np.float32(matlab_val)/255)*(mm.nImm-mm.nEmbb)+mm.nEmbb


''' visualize Results'''
mm.eval_graph(obj = np.flip(obj,2)) # result will be stored in mm.allSumAmp

'''Add Regularizers'''
mm.regularizer(if_tvreg=True, if_posreg=True)

'''define Cost-function'''
mm.loss(loss_type = 4)

''' initialize variables '''
mm.compileGraph()

''' write the results to disk'''
mm.saveResults()

if(mm.if_optimize):
    # feed tensor externally
 def mirror(image):
     return np.flip(image, -1).copy()
Ejemplo n.º 46
0
	#scipy.io.wavfile.write(filename_out, 48000, audio_data_absolute)
	#filename_out = os.path.join(args.output, input_data["audio_filename"] + "_shape1.wav")
	#scipy.io.wavfile.write(filename_out, 48000, shape)
	
	windows = int(48000 * 0.02)
	count_all = 0
	count = 0
	for sample_audio in shape:
		count_all += 1
		if sample_audio == set_value:
			count = windows
			continue
		count -= 1
		if count >= 0:
			shape[count_all-1] = set_value;
	shape = np.flip(shape)
	count_all = 0
	count = 0
	for sample_audio in shape:
		count_all += 1
		if sample_audio == set_value:
			count = windows
			continue
		count -= 1
		if count >= 0:
			shape[count_all-1] = set_value;
		else:
			shape[count_all-1] = 0
	shape = np.flip(shape)
	
	filename_out = os.path.join(args.output, input_data["audio_filename"] + "_shape.wav")
Ejemplo n.º 47
0
def summary_plot(shap_values,
                 id,
                 features=None,
                 feature_names=None,
                 max_display=None,
                 plot_type="dot",
                 color=None,
                 axis_color="#333333",
                 title=None,
                 alpha=1,
                 show=True,
                 sort=True,
                 color_bar=True,
                 auto_size_plot=True,
                 layered_violin_max_num_bins=20,
                 class_names=None,
                 xrange=None):
    """Create a SHAP summary plot, colored by feature values when they are provided.

    Parameters
    ----------
    shap_values : numpy.array
        Matrix of SHAP values (# samples x # features)

    features : numpy.array or pandas.DataFrame or list
        Matrix of feature values (# samples x # features) or a feature_names list as shorthand

    feature_names : list
        Names of the features (length # features)

    max_display : int
        How many top features to include in the plot (default is 20, or 7 for interaction plots)

    plot_type : "dot" (default) or "violin"
        What type of summary plot to produce
    """
    pl.figure(id)
    multi_class = False
    if isinstance(shap_values, list):
        multi_class = True
        plot_type = "bar"  # only type supported for now
    else:
        assert len(
            shap_values.shape
        ) != 1, "Summary plots need a matrix of shap_values, not a vector."

    # default color:
    if color is None:
        if plot_type == 'layered_violin':
            color = "coolwarm"
        elif multi_class:
            color = lambda i: colors.red_blue_circle(i / len(shap_values))
        else:
            color = colors.blue_rgb

    # convert from a DataFrame or other types
    if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
        if feature_names is None:
            feature_names = features.columns
        features = features.values
    elif isinstance(features, list):
        if feature_names is None:
            feature_names = features
        features = None
    elif (features is not None) and len(
            features.shape) == 1 and feature_names is None:
        feature_names = features
        features = None

    num_features = (shap_values[0].shape[1]
                    if multi_class else shap_values.shape[1])

    if feature_names is None:
        feature_names = np.array(
            [labels['FEATURE'] % str(i) for i in range(num_features)])

    # plotting SHAP interaction values
    if not multi_class and len(shap_values.shape) == 3:
        if max_display is None:
            max_display = 7
        else:
            max_display = min(len(feature_names), max_display)

        sort_inds = np.argsort(-np.abs(shap_values.sum(1)).sum(0))

        # get plotting limits
        delta = 1.0 / (shap_values.shape[1]**2)
        slow = np.nanpercentile(shap_values, delta)
        shigh = np.nanpercentile(shap_values, 100 - delta)
        v = max(abs(slow), abs(shigh))
        slow = -v
        shigh = v
        pl.figure(figsize=(1.5 * max_display + 1, 0.8 * max_display + 1))
        pl.subplot(1, max_display, 1)
        proj_shap_values = shap_values[:, sort_inds[0], sort_inds]
        proj_shap_values[:,
                         1:] *= 2  # because off diag effects are split in half
        summary_plot(proj_shap_values,
                     features[:, sort_inds] if features is not None else None,
                     feature_names=feature_names[sort_inds],
                     sort=False,
                     show=False,
                     color_bar=False,
                     auto_size_plot=False,
                     max_display=max_display,
                     id=id)
        pl.xlim((slow, shigh))
        pl.xlabel("")
        title_length_limit = 11
        pl.title(shorten_text(feature_names[sort_inds[0]], title_length_limit))
        for i in range(1, min(len(sort_inds), max_display)):
            ind = sort_inds[i]
            pl.subplot(1, max_display, i + 1)
            proj_shap_values = shap_values[:, ind, sort_inds]
            proj_shap_values *= 2
            proj_shap_values[:,
                             i] /= 2  # because only off diag effects are split in half
            summary_plot(proj_shap_values,
                         features[:,
                                  sort_inds] if features is not None else None,
                         sort=False,
                         feature_names=["" for i in range(len(feature_names))],
                         show=False,
                         color_bar=False,
                         auto_size_plot=False,
                         max_display=max_display,
                         id=id)
            pl.xlim((slow, shigh))
            pl.xlabel("")
            if i == min(len(sort_inds), max_display) // 2:
                pl.xlabel(labels['INTERACTION_VALUE'])
            pl.title(shorten_text(feature_names[ind], title_length_limit))
        #pl.tight_layout(pad=0, w_pad=0, h_pad=0.0)
        pl.subplots_adjust(hspace=0, wspace=0.1)
        if show:
            pl.show()
        return

    if max_display is None:
        max_display = 20

    if sort:
        # order features by the sum of their effect magnitudes
        if multi_class:
            feature_order = np.argsort(
                np.sum(np.mean(np.abs(shap_values), axis=0), axis=0))
        else:
            feature_order = np.argsort(np.sum(np.abs(shap_values), axis=0))
        feature_order = feature_order[-min(max_display, len(feature_order)):]
    else:
        feature_order = np.flip(np.arange(min(max_display, num_features)), 0)

    row_height = 0.4
    if auto_size_plot:
        pl.gcf().set_size_inches(8, len(feature_order) * row_height + 1.5)
    pl.axvline(x=0, color="#999999", zorder=-1)

    if plot_type == "dot":
        for pos, i in enumerate(feature_order):
            pl.axhline(y=pos,
                       color="#cccccc",
                       lw=0.5,
                       dashes=(1, 5),
                       zorder=-1)
            shaps = shap_values[:, i]
            values = None if features is None else features[:, i]
            inds = np.arange(len(shaps))
            np.random.shuffle(inds)
            if values is not None:
                values = values[inds]
            shaps = shaps[inds]
            colored_feature = True
            try:
                values = np.array(
                    values, dtype=np.float64)  # make sure this can be numeric
            except:
                colored_feature = False
            N = len(shaps)
            # hspacing = (np.max(shaps) - np.min(shaps)) / 200
            # curr_bin = []
            nbins = 100
            quant = np.round(nbins * (shaps - np.min(shaps)) /
                             (np.max(shaps) - np.min(shaps) + 1e-8))
            inds = np.argsort(quant + np.random.randn(N) * 1e-6)
            layer = 0
            last_bin = -1
            ys = np.zeros(N)
            for ind in inds:
                if quant[ind] != last_bin:
                    layer = 0
                ys[ind] = np.ceil(layer / 2) * ((layer % 2) * 2 - 1)
                layer += 1
                last_bin = quant[ind]
            ys *= 0.9 * (row_height / np.max(ys + 1))

            if features is not None and colored_feature:
                # trim the color range, but prevent the color range from collapsing
                vmin = np.nanpercentile(values, 5)
                vmax = np.nanpercentile(values, 95)
                if vmin == vmax:
                    vmin = np.nanpercentile(values, 1)
                    vmax = np.nanpercentile(values, 99)
                    if vmin == vmax:
                        vmin = np.min(values)
                        vmax = np.max(values)

                assert features.shape[0] == len(
                    shaps
                ), "Feature and SHAP matrices must have the same number of rows!"

                # plot the nan values in the interaction feature as grey
                nan_mask = np.isnan(values)
                pl.scatter(shaps[nan_mask],
                           pos + ys[nan_mask],
                           color="#777777",
                           vmin=vmin,
                           vmax=vmax,
                           s=16,
                           alpha=alpha,
                           linewidth=0,
                           zorder=3,
                           rasterized=len(shaps) > 500)

                # plot the non-nan values colored by the trimmed feature value
                cvals = values[np.invert(nan_mask)].astype(np.float64)
                cvals_imp = cvals.copy()
                cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0
                cvals[cvals_imp > vmax] = vmax
                cvals[cvals_imp < vmin] = vmin
                pl.scatter(shaps[np.invert(nan_mask)],
                           pos + ys[np.invert(nan_mask)],
                           cmap=red_green,
                           vmin=vmin,
                           vmax=vmax,
                           s=16,
                           c=cvals,
                           alpha=alpha,
                           linewidth=0,
                           zorder=3,
                           rasterized=len(shaps) > 500)
            else:

                pl.scatter(shaps,
                           pos + ys,
                           s=16,
                           alpha=alpha,
                           linewidth=0,
                           zorder=3,
                           color=color if colored_feature else "#777777",
                           rasterized=len(shaps) > 500)

    elif plot_type == "violin":
        for pos, i in enumerate(feature_order):
            pl.axhline(y=pos,
                       color="#cccccc",
                       lw=0.5,
                       dashes=(1, 5),
                       zorder=-1)

        if features is not None:
            global_low = np.nanpercentile(
                shap_values[:, :len(feature_names)].flatten(), 1)
            global_high = np.nanpercentile(
                shap_values[:, :len(feature_names)].flatten(), 99)
            for pos, i in enumerate(feature_order):
                shaps = shap_values[:, i]
                shap_min, shap_max = np.min(shaps), np.max(shaps)
                rng = shap_max - shap_min
                xs = np.linspace(
                    np.min(shaps) - rng * 0.2,
                    np.max(shaps) + rng * 0.2, 100)
                if np.std(shaps) < (global_high - global_low) / 100:
                    ds = gaussian_kde(shaps + np.random.randn(len(shaps)) *
                                      (global_high - global_low) / 100)(xs)
                else:
                    ds = gaussian_kde(shaps)(xs)
                ds /= np.max(ds) * 3

                values = features[:, i]
                window_size = max(10, len(values) // 20)
                smooth_values = np.zeros(len(xs) - 1)
                sort_inds = np.argsort(shaps)
                trailing_pos = 0
                leading_pos = 0
                running_sum = 0
                back_fill = 0
                for j in range(len(xs) - 1):

                    while leading_pos < len(shaps) and xs[j] >= shaps[
                            sort_inds[leading_pos]]:
                        running_sum += values[sort_inds[leading_pos]]
                        leading_pos += 1
                        if leading_pos - trailing_pos > 20:
                            running_sum -= values[sort_inds[trailing_pos]]
                            trailing_pos += 1
                    if leading_pos - trailing_pos > 0:
                        smooth_values[j] = running_sum / (leading_pos -
                                                          trailing_pos)
                        for k in range(back_fill):
                            smooth_values[j - k - 1] = smooth_values[j]
                    else:
                        back_fill += 1

                vmin = np.nanpercentile(values, 5)
                vmax = np.nanpercentile(values, 95)
                if vmin == vmax:
                    vmin = np.nanpercentile(values, 1)
                    vmax = np.nanpercentile(values, 99)
                    if vmin == vmax:
                        vmin = np.min(values)
                        vmax = np.max(values)
                pl.scatter(shaps,
                           np.ones(shap_values.shape[0]) * pos,
                           s=9,
                           cmap=red_green,
                           vmin=vmin,
                           vmax=vmax,
                           c=values,
                           alpha=alpha,
                           linewidth=0,
                           zorder=1)
                # smooth_values -= nxp.nanpercentile(smooth_values, 5)
                # smooth_values /= np.nanpercentile(smooth_values, 95)
                smooth_values -= vmin
                if vmax - vmin > 0:
                    smooth_values /= vmax - vmin
                for i in range(len(xs) - 1):
                    if ds[i] > 0.05 or ds[i + 1] > 0.05:
                        pl.fill_between([xs[i], xs[i + 1]],
                                        [pos + ds[i], pos + ds[i + 1]],
                                        [pos - ds[i], pos - ds[i + 1]],
                                        color=red_green(smooth_values[i]),
                                        zorder=2)

        else:
            parts = pl.violinplot(shap_values[:, feature_order],
                                  range(len(feature_order)),
                                  points=200,
                                  vert=False,
                                  widths=0.7,
                                  showmeans=False,
                                  showextrema=False,
                                  showmedians=False)

            for pc in parts['bodies']:
                pc.set_facecolor(color)
                pc.set_edgecolor('none')
                pc.set_alpha(alpha)

    elif plot_type == "layered_violin":  # courtesy of @kodonnell
        num_x_points = 200
        bins = np.linspace(
            0, features.shape[0], layered_violin_max_num_bins + 1
        ).round(0).astype(
            'int')  # the indices of the feature data corresponding to each bin
        shap_min, shap_max = np.min(shap_values), np.max(shap_values)
        x_points = np.linspace(shap_min, shap_max, num_x_points)

        # loop through each feature and plot:
        for pos, ind in enumerate(feature_order):
            # decide how to handle: if #unique < layered_violin_max_num_bins then split by unique value, otherwise use bins/percentiles.
            # to keep simpler code, in the case of uniques, we just adjust the bins to align with the unique counts.
            feature = features[:, ind]
            unique, counts = np.unique(feature, return_counts=True)
            if unique.shape[0] <= layered_violin_max_num_bins:
                order = np.argsort(unique)
                thesebins = np.cumsum(counts[order])
                thesebins = np.insert(thesebins, 0, 0)
            else:
                thesebins = bins
            nbins = thesebins.shape[0] - 1
            # order the feature data so we can apply percentiling
            order = np.argsort(feature)
            # x axis is located at y0 = pos, with pos being there for offset
            y0 = np.ones(num_x_points) * pos
            # calculate kdes:
            ys = np.zeros((nbins, num_x_points))
            for i in range(nbins):
                # get shap values in this bin:
                shaps = shap_values[order[thesebins[i]:thesebins[i + 1]], ind]
                # if there's only one element, then we can't
                if shaps.shape[0] == 1:
                    warnings.warn(
                        "not enough data in bin #%d for feature %s, so it'll be ignored. Try increasing the number of records to plot."
                        % (i, feature_names[ind]))
                    # to ignore it, just set it to the previous y-values (so the area between them will be zero). Not ys is already 0, so there's
                    # nothing to do if i == 0
                    if i > 0:
                        ys[i, :] = ys[i - 1, :]
                    continue
                # save kde of them: note that we add a tiny bit of gaussian noise to avoid singular matrix errors
                ys[i, :] = gaussian_kde(shaps + np.random.normal(
                    loc=0, scale=0.001, size=shaps.shape[0]))(x_points)
                # scale it up so that the 'size' of each y represents the size of the bin. For continuous data this will
                # do nothing, but when we've gone with the unqique option, this will matter - e.g. if 99% are male and 1%
                # female, we want the 1% to appear a lot smaller.
                size = thesebins[i + 1] - thesebins[i]
                bin_size_if_even = features.shape[0] / nbins
                relative_bin_size = size / bin_size_if_even
                ys[i, :] *= relative_bin_size
            # now plot 'em. We don't plot the individual strips, as this can leave whitespace between them.
            # instead, we plot the full kde, then remove outer strip and plot over it, etc., to ensure no
            # whitespace
            ys = np.cumsum(ys, axis=0)
            width = 0.8
            scale = ys.max(
            ) * 2 / width  # 2 is here as we plot both sides of x axis
            for i in range(nbins - 1, -1, -1):
                y = ys[i, :] / scale
                c = pl.get_cmap(color)(
                    i / (nbins - 1)
                ) if color in pl.cm.datad else color  # if color is a cmap, use it, otherwise use a color
                pl.fill_between(x_points, pos - y, pos + y, facecolor=c)
        pl.xlim(shap_min, shap_max)

    elif not multi_class and plot_type == "bar":
        feature_inds = feature_order[:max_display]
        y_pos = np.arange(len(feature_inds))
        global_shap_values = np.abs(shap_values).mean(0)
        pl.barh(y_pos,
                global_shap_values[feature_inds],
                0.7,
                align='center',
                color=color,
                left=np.amin(shap_values) * 1.1 -
                np.max(global_shap_values)) if xrange is None else pl.barh(
                    y_pos,
                    global_shap_values[feature_inds],
                    0.7,
                    align='center',
                    color=color,
                    left=xrange[0])
        pl.yticks(y_pos, fontsize=13)
        pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])

    elif multi_class and plot_type == "bar":
        if class_names is None:
            class_names = ["Class " + str(i) for i in range(len(shap_values))]
        feature_inds = feature_order[:max_display]
        y_pos = np.arange(len(feature_inds))
        left_pos = np.zeros(len(feature_inds))

        class_inds = np.argsort(
            [-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])
        for i, ind in enumerate(class_inds):
            global_shap_values = np.abs(shap_values[ind]).mean(0)
            pl.barh(y_pos,
                    global_shap_values[feature_inds],
                    0.7,
                    left=left_pos,
                    align='center',
                    color=color(i),
                    label=class_names[ind])
            left_pos += global_shap_values[feature_inds]
        pl.yticks(y_pos, fontsize=13)
        pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
        pl.legend(frameon=False, fontsize=12)

    # draw the color bar
    if color_bar and features is not None and plot_type != "bar" and \
            (plot_type != "layered_violin" or color in pl.cm.datad):
        import matplotlib.cm as cm
        m = cm.ScalarMappable(cmap=red_green if plot_type != "layered_violin"
                              else pl.get_cmap(color))
        m.set_array([0, 1])
        cb = pl.colorbar(m, ticks=[0, 1], aspect=1000)
        cb.set_ticklabels(
            [labels['FEATURE_VALUE_LOW'], labels['FEATURE_VALUE_HIGH']])
        cb.set_label(labels['FEATURE_VALUE'], size=12, labelpad=0)
        cb.ax.tick_params(labelsize=11, length=0)
        cb.set_alpha(1)
        cb.outline.set_visible(False)
        bbox = cb.ax.get_window_extent().transformed(
            pl.gcf().dpi_scale_trans.inverted())
        cb.ax.set_aspect((bbox.height - 0.9) * 20)
        # cb.draw_all()

    pl.gca().xaxis.set_ticks_position('bottom')
    pl.gca().yaxis.set_ticks_position('none')
    pl.gca().spines['right'].set_visible(False)
    pl.gca().spines['top'].set_visible(False)
    pl.gca().spines['left'].set_visible(False)
    pl.xlim(xrange[0], xrange[1]) if xrange is not None else 0
    pl.gca().tick_params(color=axis_color, labelcolor=axis_color)
    pl.yticks(range(len(feature_order)),
              [feature_names[i] for i in feature_order],
              fontsize=13)
    if plot_type != "bar":
        pl.gca().tick_params('y', length=20, width=0.5, which='major')
    pl.gca().tick_params('x', labelsize=11)
    pl.ylim(-1, len(feature_order))
    if plot_type == "bar":
        pl.xlabel(labels['GLOBAL_VALUE'], fontsize=13)
    else:
        pl.xlabel(labels['VALUE'], fontsize=13)
    if show:
        pl.get_current_fig_manager().window.showMaximized()
        pl.title(title, fontsize=20)
        pl.xlabel('Normalised SHAP value')
        pl.figure(id)
        pl.show()
Ejemplo n.º 48
0
index = 0
for i in reversed(range(len(y))):
    if y[i] == labels[index,0] and not labels[index,1]:
        labels[index,1] = 1
        
        X_template[index,:,:] = X_new[i,:,:]
        X_new = np.delete(X_new, i, axis=0)
        
        y_template[index] = y[i]
        y = np.delete(y, i, axis=0)
        
        index += 1
        if index == num_labels:
            break
X_template = np.flip(X_template, axis=0)
y_template = np.flip(y_template, axis=0)        

#Classify in least distance
y_pred = np.zeros((len(X_new)))
    
for s in range(len(X_new)): #For all values of X
    
    distances = np.empty((num_labels))
    
    inter_dists = np.zeros((X_template.shape[0], X_template.shape[1])) #Comparisons for classification   
    
    for i in range(inter_dists.shape[0]): #For all labels
        for j in range(inter_dists.shape[1]): #For all frequency sequences
            dist, _ = fastdtw(X_template[i,j,:], X_new[s,j,:]) #Compute distance
            
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
import time

date = np.loadtxt(
    'C:\\Users\\User\\Desktop\\Covid-19 Data\\Countries File\\United_Kingdom.txt',
    dtype=np.str,
    usecols=(0, ))

daily_cases = np.loadtxt(
    'C:\\Users\\User\\Desktop\\Covid-19 Data\\Countries File\\United_Kingdom.txt',
    usecols=(2, ))
daily_deaths = np.loadtxt(
    'C:\\Users\\User\\Desktop\\Covid-19 Data\\Countries File\\United_Kingdom.txt',
    usecols=(3, ))
flip_cases = np.flip(daily_cases)
flip_deaths = np.flip(daily_deaths)
flip_date = np.flip(date)
x_values = np.arange(len(date))


def func(x, a, b, c):
    return a * np.exp((-b * x)) + c


#,
popt, pcov = curve_fit(func, x_values, flip_cases, p0=[1, 1e-6, 1])
perr = np.sqrt(np.diag(pcov))

print(popt)
fitting_values = func(x_values, popt[0], popt[1], popt[2])
Ejemplo n.º 50
0
else:
    raise ValueError('Unrecognized measure choice')

average_ne = ('mean', 'max')
#%% Get ne, z-profile
ne_avg_r = {}; t = {}; z={}; z_cc={}; ne_avg_r={}; ne_avg_r_cc={}; tt={}; zz={}
for kind in average_ne:
    z_cc[kind], z[kind], ne_avg_r[kind], t[kind] = map(np.array, apl.ne_avg_over_r(sim[kind], pluto_nframes, kind,
                                                                                   ret_z_cell_borders=True))
    # I average ne over the time cells, so that I get a quantity that is cell centered
    # also on the time grid (before it was vertex centered on the time grid and cell centered
    # on the z grid)
    ne_avg_r_cc[kind] = 0.5*(ne_avg_r[kind][1:,:]+ne_avg_r[kind][:-1,:])

    # Maybe the code would work even without flipping, but I do so, to make if more robust
    z[kind] = np.concatenate((np.flip(-z[kind], axis=0), z[kind][1:]))
    ne_avg_r_cc[kind] = np.concatenate((np.flip(ne_avg_r_cc[kind], axis=1), ne_avg_r_cc[kind]), axis=1)

    tt[kind],zz[kind] = np.meshgrid(t[kind],z[kind])

#%% Read picture
gs = gridspec.GridSpec(6, 2,
                       width_ratios=[30, 2],
                       height_ratios=[1, 1, 1, 1, 1, 1]
                       )
fig = plt.figure(figsize=(5.5,5.5))
ax_sim = [[],[]]
ax_sim[0]  = plt.subplot(gs[2:4,0])
ax_sim[1]  = plt.subplot(gs[4:,0], sharex = ax_sim[0])
ax_meas = plt.subplot(gs[:2,0], sharex = ax_sim[0])
ax_cb = plt.subplot(gs[1:-1,1])
Ejemplo n.º 51
0
    def from_kinship(cls, y, x, k, p_path=None, overwrite=False):
        r"""Initializes a model from :math:`y`, :math:`X`, and :math:`K`.

        Examples
        --------
        >>> from hail.stats import LinearMixedModel
        >>> y = np.array([0.0, 1.0, 8.0, 9.0])
        >>> x = np.array([[1.0, 0.0],
        ...               [1.0, 2.0],
        ...               [1.0, 1.0],
        ...               [1.0, 4.0]])
        >>> k = np.array([[ 1.        , -0.8727875 ,  0.96397335,  0.94512946],
        ...               [-0.8727875 ,  1.        , -0.93036112, -0.97320323],
        ...               [ 0.96397335, -0.93036112,  1.        ,  0.98294169],
        ...               [ 0.94512946, -0.97320323,  0.98294169,  1.        ]])
        >>> model, p = LinearMixedModel.from_kinship(y, x, k)
        >>> model.fit()
        >>> model.h_sq  # doctest: +NOTEST
        0.2525148830695317

        >>> model.s  # doctest: +NOTEST
        array([3.83501295, 0.13540343, 0.02454114, 0.00504248])

        Truncate to a rank :math:`r=2` model:

        >>> r = 2
        >>> s_r = model.s[:r]
        >>> p_r = p[:r, :]
        >>> model_r = LinearMixedModel(p_r @ y, p_r @ x, s_r, y, x)
        >>> model.fit()
        >>> model.h_sq  # doctest: +NOTEST
        0.25193197591429695

        Notes
        -----
        This method eigendecomposes :math:`K = P^T S P` on the master and
        returns ``LinearMixedModel(p @ y, p @ x, s)`` and ``p``.

        The performance of eigendecomposition depends critically on the
        number of master cores and the NumPy / SciPy configuration, viewable
        with ``np.show_config()``. For Intel machines, we recommend installing
        the `MKL <https://anaconda.org/anaconda/mkl>`__ package for Anaconda, as
        is done by `cloudtools <https://github.com/Nealelab/cloudtools>`__.

        `k` must be positive semi-definite; symmetry is not checked as only the
        lower triangle is used.

        Parameters
        ----------
        y: :class:`ndarray`
            :math:`n` vector of observations.
        x: :class:`ndarray`
            :math:`n \times p` matrix of fixed effects.
        k: :class:`ndarray`
            :math:`n \times n` positive semi-definite kernel :math:`K`.
        p_path: :obj:`str`, optional
            Path at which to write :math:`P` as a block matrix.
        overwrite: :obj:`bool`
            If ``True``, overwrite an existing file at `p_path`.

        Returns
        -------
        model: :class:`LinearMixedModel`
            Model constructed from :math:`y`, :math:`X`, and :math:`K`.
        p: :class:`ndarray`
            Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
        """
        _check_dims(y, "y", 1)
        _check_dims(x, "x", 2)
        _check_dims(k, "k", 2)

        n = k.shape[0]
        if k.shape[1] != n:
            raise ValueError("from_kinship: 'k' must be a square matrix")
        if y.shape[0] != n:
            raise ValueError("from_kinship: 'y' and 'k' must have the same "
                             "number of rows")
        if x.shape[0] != n:
            raise ValueError("from_kinship: 'x' and 'k' must have the same "
                             "number of rows")

        s, u = hl.linalg._eigh(k)
        if s[0] < -1e12 * s[-1]:
            raise Exception("from_kinship: smallest eigenvalue of 'k' is"
                            f"negative: {s[0]}")

        # flip singular values to descending order
        s = np.flip(s, axis=0)
        u = np.fliplr(u)
        p = u.T
        if p_path:
            BlockMatrix.from_numpy(p).write(p_path, overwrite=overwrite)

        model = LinearMixedModel(p @ y, p @ x, s, p_path=p_path)
        return model, p
Ejemplo n.º 52
0
    CP = CP + V
    CF = func(CP)
    LBF1 = np.maximum(CF, LBF)
    GBF = np.max(LBF1)
    GBP = LBP[np.where(LBF1 == GBF)]
    for i in range(n):
        if LBF1[i] in CF[i]:
            LBP[i] = CP[i]
        else:
            LBP[i] = LBF[i]

s_list = np.zeros((n, 2))
for i in enumerate(sum_1 * .1):
    s_list[i[0]] = [i[1], GBP]

par_list = np.flip(
    cosine_similarity(s_list, coords).diagonal().argsort()[-25:-1])
"""print("Agglomerative")
sum = 0
a_list = np.zeros((len(par_list),2))
for i in enumerate(par_list):
    att = np.where(b_vec[i[1]] == 1)
    att=att[0]
    col = []
    for j in att:
        col.append(colm[j-1])
    X_new = X.drop(col,axis=1)
    clustering = AgglomerativeClustering().fit(X_new)
    y_pred = clustering.labels_
    a_list[i[0]][0] = accuracy_score(y_pred,y)
    a_list[i[0]][1] = X_new.shape[1]
    sum +=accuracy_score(y_pred,y)
# testing with VTK tools
from vtk.numpy_interface import dataset_adapter as dsa
test_array = dsa.numpyTovtkDataArray(nifti.T)
points = vtk.vtkPoints()
points.SetData(test_array)

# saving the volumetric data to a VTK file
nifti_volumetric_path = os.path.join(origin_path,
                                     "new-data/id1090_timestep.nii")

nii_vol = nib.load(nifti_volumetric_path)

data_vol_3d = nii_vol.get_data()
pixdims = nii_vol.header['pixdim'][1:4]

data_vol_3d = np.flip(data_vol_3d, axis=2)
data_vol_3d = np.flip(data_vol_3d, axis=1)
#data_vol_3d = np.flip(data_vol_3d, axis=0)

#data_vol_3d_test = dsa.numpyTovtkDataArray(data_vol_3d)
#data_vol_1d = np.array(np.nonzero(data_vol_3d), dtype=np.float32)

# order F seems to give better results
dataImporter = vtk.vtkImageImport()
data_string = data_vol_3d.flatten(order='F').tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))

dataImporter.SetDataScalarTypeToShort()
dataImporter.SetNumberOfScalarComponents(1)

s = data_vol_3d.shape
Ejemplo n.º 54
0
def live_application(capture, arg):
    pygame.init()
    display = pygame.display.set_mode((640, 480))
    pygame.display.set_caption('Real Time Hand Recon')

    dd = pickle.load(open("MANO_RIGHT.pkl", 'rb'), encoding='latin1')
    face = np.array(dd['f'])

    model = HandNet()
    model = model.to(device)
    checkpoint_io = CheckpointIO('.', model=model)
    load_dict = checkpoint_io.load('checkpoints/model.pt')
    model.eval()

    renderer = utils.MeshRenderer(face, img_size=[640, 480])

    o_intr = torch.from_numpy(
        np.array([
            [arg.fx, 0.0, arg.cx],
            [0.0, arg.fy, arg.cy],
            [0.0, 0.0, 1.0],
        ],
                 dtype=np.float32)).unsqueeze(0).numpy()

    o_camparam = np.zeros((4))
    o_camparam[0] = o_intr[0, 0, 0]
    o_camparam[1] = o_intr[0, 1, 1]
    o_camparam[2] = o_intr[0, 0, 2]
    o_camparam[3] = o_intr[0, 1, 2]

    gr = jit(grad(residuals))
    lr = 0.03
    opt_init, opt_update, get_params = optimizers.adam(lr, b1=0.5, b2=0.5)
    opt_init = jit(opt_init)
    opt_update = jit(opt_update)
    get_params = jit(get_params)
    x_reg = np.ones((10, )) * 240
    y_reg = np.ones((10, )) * 240
    s_reg = np.ones((10, )) * 240
    weight = np.array([0, 0, 0, 0, 0, 0, 0, 0.1, 0.2, 0.7])
    i = 0
    x = 240
    y = 320
    scale = 256
    with torch.no_grad():
        while True:
            i = i + 1
            img = capture.read()
            frame = img.copy()
            if img is None:
                continue
            vmin = max(0, y - scale // 2)
            vmin_p = max(scale // 2 - y, 0)
            umin = max(0, x - scale // 2)
            umin_p = max(scale // 2 - x, 0)
            vmax = min(640, y + scale // 2)
            vmax_p = max(scale // 2 + y - 640, 0)
            umax = min(480, x + scale // 2)
            umax_p = max(scale // 2 + x - 480, 0)
            img = img[int(umin):int(umax), int(vmin):int(vmax), :]
            img = cv2.copyMakeBorder(img,
                                     int(umin_p),
                                     int(umax_p),
                                     int(vmin_p),
                                     int(vmax_p),
                                     cv2.BORDER_CONSTANT,
                                     value=[255, 255, 255])

            cx = arg.cx - y + scale // 2
            cy = arg.cy - x + scale // 2

            cx = (cx * 256) / scale
            cy = (cy * 256) / scale
            fx = (arg.fx * 256) / scale
            fy = (arg.fy * 256) / scale

            intr = torch.from_numpy(
                np.array([
                    [fx, 0.0, cx],
                    [0.0, fy, cy],
                    [0.0, 0.0, 1.0],
                ],
                         dtype=np.float32)).unsqueeze(0).to(device)

            _intr = intr.cpu().numpy()
            camparam = np.zeros((1, 21, 4))
            camparam[:, :, 0] = _intr[:, 0, 0]
            camparam[:, :, 1] = _intr[:, 1, 1]
            camparam[:, :, 2] = _intr[:, 0, 2]
            camparam[:, :, 3] = _intr[:, 1, 2]

            img = cv2.resize(img, (256, 256), cv2.INTER_LINEAR)

            img = functional.to_tensor(img).float()
            img = functional.normalize(img, [0.5, 0.5, 0.5], [1, 1, 1])
            img = img.unsqueeze(0).to(device)

            hm, so3, beta, joint_root, bone = model(img, intr)
            kp2d = hm_to_kp2d(hm.detach().cpu().numpy()) * 4
            so3 = so3[0].detach().cpu().float().numpy()
            beta = beta[0].detach().cpu().float().numpy()
            bone = bone[0].detach().cpu().numpy()
            joint_root = joint_root[0].detach().cpu().numpy()
            so3 = npj.array(so3)
            beta = npj.array(beta)
            bone = npj.array(bone)
            joint_root = npj.array(joint_root)
            kp2d = npj.array(kp2d)
            so3_init = so3
            beta_init = beta
            joint_root = reinit_root(joint_root, kp2d, camparam)
            joint = mano_de_j(so3, beta)
            bone = reinit_scale(joint, kp2d, camparam, bone, joint_root)
            params = {'so3': so3, 'beta': beta, 'bone': bone}
            opt_state = opt_init(params)
            n = 0
            while n < 20:
                n = n + 1
                params = get_params(opt_state)
                grads = gr(params, so3_init, beta_init, joint_root, kp2d,
                           camparam)
                opt_state = opt_update(n, grads, opt_state)
            params = get_params(opt_state)
            v = mano_de(params, joint_root, bone)

            kp2d = np.array(kp2d[0])
            x = x + ((kp2d[9, 1] - 128) * scale) / 256
            y = y + ((kp2d[9, 0] - 128) * scale) / 256
            scale = max(
                max(kp2d[:, 0].max() - kp2d[:, 0].min(),
                    kp2d[:, 1].max() - kp2d[:, 1].min()) * 2, 80)

            x_reg[:9] = x_reg[1:]
            x_reg[-1] = x
            y_reg[:9] = y_reg[1:]
            y_reg[-1] = y
            s_reg[:9] = s_reg[1:]
            s_reg[-1] = scale

            x = (x_reg * weight).sum()
            y = (y_reg * weight).sum()
            scale = (s_reg * weight).sum()

            frame = renderer(v, o_intr[0], frame)
            frame = cv2.rectangle(frame, (int(vmin), int(umin)),
                                  (int(vmax), int(umax)), (255, 255, 255),
                                  thickness=5)
            display.blit(
                pygame.surfarray.make_surface(
                    np.transpose(np.flip(frame, 1), (1, 0, 2))), (0, 0))
            pygame.display.update()
masked_image_path = '/media/pkao/Dataset/COVIDDataset/MaskedOthersSlices/'
patients = os.listdir(image_path)
for i in range(len(patients)):
    patient = load_scan(image_path + patients[i])
    patient_pixels = get_pixels_hu(patient)
    patient_pixels = normalize(patient_pixels)
    print(patient_pixels.shape)
    seg_patient = nib.load(segmented_image_path + patients[i]+"_mask.nii")
    seg_patient_pixels = seg_patient.get_fdata()
    print(seg_patient_pixels.shape)
    patient_pixels_trans = np.zeros(np.shape(seg_patient_pixels))
    for l in range(patient_pixels.shape[0]):
        for m in range(patient_pixels.shape[1]):
            for n in range(patient_pixels.shape[2]):
                patient_pixels_trans[m][n][l] = patient_pixels[l][n][m]
    print(patient_pixels_trans.shape)
    masked_image = np.where(seg_patient_pixels!=0, patient_pixels_trans, 0)
    print(masked_image.shape)
    for j in range(masked_image.shape[2]):
        if(np.any(masked_image[:,:,j])):
#             im = Image.fromarray(masked_image[:,:,j])
#             im.save(str(masked_image_path)+ patients[i]+'_slice_'+str(j)+'.nii')
            img = nib.Nifti1Image(np.flip(masked_image[:,:,j]), np.eye(4))
            nib.save(img, os.path.join(masked_image_path, patients[i]+'_slice_'+str(j)+'.nii'))
            if(patients[i] in x_train):
                list_train.append(str(patients[i]+'_slice_'+str(j)+'.nii'))
            elif(patients[i] in x_valid):   
                list_val.append(str(patients[i]+'_slice_'+str(j)+'.nii') )               
            else:
                list_test.append(str(patients[i]+'_slice_'+str(j)+'.nii'))
Ejemplo n.º 56
0
        latlim = [-10+tclat1, 10+tclat1]
        lonlim = [-10+tclon1, 10+tclon1]
    print(htt.time2str(t, 'yyyymmdd_HHMMSS'))
    gc.collect()
    if os.path.exists(figname):
        t = t+15*60
        continue

    try:
        lon2, lat2, tb2, tb3, tb4, lat_fy4a, lon_fy4a, ccc = \
            FY4A_FIG.get_tb3(t, lonlim, latlim, addlight=True)

        fig = plt.figure(figsize=(8, 6), dpi=100)
        ax = fig.add_subplot(1, 1, 1)
        f = ax.imshow(
            np.flip(tb4, 0), cmap=ccc,
            extent=(lonlim[0], lonlim[1], latlim[0], latlim[1]))
        plt.title(htt.time2str(t, 'yyyy/mm/dd HH:MM:SS'))
        ax.plot(tclon, tclat, 'r-+', linewidth=0.5)
        ax.plot(tclon1, tclat1, 'g+', linewidth=0.5, markersize=20)
        ax.set_xlim(lonlim)
        ax.set_ylim(latlim)
        ax.plot(cll[:, 0], cll[:, 1], 'k-', linewidth=0.5)

        ax, _ = mpl.colorbar.make_axes(plt.gca(), shrink=0.5)
        cbar = mpl.colorbar.ColorbarBase(
            ax, cmap=ccc, norm=mpl.colors.Normalize(vmin=-110, vmax=50))
        fig.savefig(figname, dpi=200)
        fig.close()
    except:
        pass
Ejemplo n.º 57
0
            state_fw = zeros((res_size + inSize, 1))
            state_bw = zeros((res_size + inSize, 1))
        for t in range(trainLen):
            u_fw = reshape(asarray(Xtr[t]), (inSize, 1))
            inputs.append(u_fw)
            if use_reservoirs == "True":
                u_bw = reshape(asarray(Xtr[trainLen - 1 - t]), (inSize, 1))
                x_fw = (1 - a) * x_fw + a * tanh(
                    dot(diag(G_fw) * Win_fw, u_fw) +
                    dot(diag(G_fw) * W_fw, x_fw) + B_fw)
                x_bw = (1 - a) * x_bw + a * tanh(
                    dot(diag(G_bw) * Win_bw, u_bw) +
                    dot(diag(G_bw) * W_bw, x_bw) + B_bw)
                fw_states.append(x_fw)
                bw_states.append(x_bw)
        intermediate_data.append((inputs, fw_states, flip(bw_states, 0), Ytr))

    print "...done."

    # f = open(os.path.join(save_path, 'trained_ESN3_' + name_add + '.cpickle'), "wb")
    # cPickle.dump(intermediate_data, f, protocol=2)
    # f.close()

    print "Training..."
    all_train_error = []
    for iter in range(training_iterations):
        for i, text in enumerate(intermediate_data):
            print 'training on text ' + str(i + 1) + '/' + str(
                len(train_data)) + '...'
            trainLen = len(text[0])
            curr_train_error = zeros((trainLen))
Ejemplo n.º 58
0
    def process(self, out=None):

        data = self.get_input()
        geometry = data.geometry

        angles_deg = geometry.config.angles.angle_data.copy()

        if geometry.config.angles.angle_unit == "radian":
            angles_deg *= 180 / np.pi

        #keep angles in range -180 to 180
        while angles_deg.min() < -180:
            angles_deg[angles_deg < -180] += 360

        while angles_deg.max() >= 180:
            angles_deg[angles_deg >= 180] -= 360

        target = angles_deg[self.projection_index] + 180

        if target < -180:
            target += 360
        elif target >= 180:
            target -= 360

        ind = np.abs(angles_deg - target).argmin()

        if abs(angles_deg[ind] - angles_deg[0]) - 180 > self.ang_tol:
            raise ValueError(
                'Method requires projections at 180 degrees interval')

        #cross correlate single slice with the 180deg one reveresed
        data_slice = data.subset(vertical=self.slice_index)

        data1 = data_slice.subset(angle=0).as_array()
        data2 = np.flip(data_slice.subset(angle=ind).as_array())

        border = int(data1.size * 0.05)
        lag = np.correlate(data1[border:-border], data2[border:-border],
                           "full")

        ind = lag.argmax()

        #fit quadratic to 3 centre points
        a = (lag[ind + 1] + lag[ind - 1] - 2 * lag[ind]) * 0.5
        b = a + lag[ind] - lag[ind - 1]
        quad_max = -b / (2 * a) + ind

        shift = (quad_max - (lag.size - 1) / 2) / 2
        shift = np.floor(shift * 100 + 0.5) / 100

        new_geometry = data.geometry.copy()

        #set up new geometry
        new_geometry.config.system.rotation_axis.position[
            0] = shift * geometry.config.panel.pixel_size[0]

        print("Centre of rotation correction using cross-correlation")
        print("\tCalculated from slice: ", self.slice_index)
        print("\tApplied centre of rotation shift = ", shift,
              "pixels at the detector.")

        if out is None:
            return AcquisitionData(
                array=data,
                deep_copy=True,
                dimension_labels=new_geometry.dimension_labels,
                geometry=new_geometry,
                supress_warning=True)
        else:
            out.geometry = new_geometry
Ejemplo n.º 59
0
def _reverse(tensor, axis, name=None):  # pylint: disable=unused-argument
  if np.array(axis).ndim == 0:
    return np.flip(tensor, axis)
  for ax in axis:
    tensor = np.flip(tensor, ax)
  return tensor
Ejemplo n.º 60
0
Archivo: obs.py Proyecto: mr91i/envos
 def flip_ax(self, ax, dax, num):
     if (len(ax) >= 2) and (ax[1] < ax[0]):
         ax = ax[::-1]
         dax = ax[1] - ax[0]
         np.flip(self.Ippv, num)
     return ax, dax