def get_exmaple_regions(Page_Dist, Our_Docs, F,Per_Small, Per_Large0):
	Cut = 3 
	ind0 = np.round(np.random.uniform(0,len(F)))
	print(ind0)
	tA = F.tran_mat_index1.tolist()[int(ind0)]
	tB = F.tran_mat_index2.tolist()[int(ind0)]
	TFA = (Page_Dist[tA,:]<Per_Small) & (Page_Dist[tB,:]>Per_Large0)
	indexes=np.indices(Page_Dist.shape)
	indA = indexes[1,0,TFA]
	indApd=pd.DataFrame({'tran_mat_index':indA})
	indApd['Region'] = 'A'
	indApd['dist_to_A'] = Page_Dist[tA,indA] 
	indApd['dist_to_B'] = Page_Dist[tB,indA]
	indApd=indApd.sort_index(by = 'dist_to_A')
	indApd=indApd[0:Cut:1]
	TFB = (Page_Dist[tB,:]<Per_Small) & (Page_Dist[tA,:]>Per_Large0)
	indexes=np.indices(Page_Dist.shape)
	indB =indexes[1,0,TFB]
	indBpd=pd.DataFrame({'tran_mat_index':indB})
	indBpd['Region'] = 'B'
	indBpd['dist_to_A'] = Page_Dist[tA,indB] 
	indBpd['dist_to_B'] = Page_Dist[tB,indB]
	indBpd=indBpd.sort_index(by = 'dist_to_B')
	indBpd=indBpd[0:Cut:1]
	Info = pd.concat([indApd,indBpd])
	Info = join_replace(Info,Our_Docs[['usid_index','tran_mat_index','year','parties']],'tran_mat_index')
	MN = np.min(np.array([np.sum(TFA),np.sum(TFB)]))
	if MN<Cut: print("Too small")
	print(Info.to_string())
	return Info
Example #2
0
	def __init__(self, Nm, Nf, L, qpf = 3):
		self.Nm = Nm
		self.Nf = Nf
		self.L = L
		self.qpf = qpf

		self.mass_res = L / Nm
		self.force_res = L / Nf

		self.X = np.indices((Nm, Nm)).astype(float)
		self.MX = self.X * self.mass_res

		# subdivide mass elements
		self.XX = (self.X.transpose([1,2,0]).reshape([Nm**2,2])[:,np.newaxis,:] + \
				subdiv_unitcell[self.qpf]).reshape([Nm**2 * 2**(2*self.qpf), 2])

		self.FX = np.indices((Nf, Nf)).astype(float) * self.force_res

		# k-values, divide by resolution to get physical scales
		self.Km = self.make_K(Nm)
		self.km2 = (self.Km**2).sum(axis=0)
		self.km2[0, 0] = 1

		self.Kf = self.make_K(Nf)
		self.kf2 = (self.Kf**2).sum(axis=0)
		self.kf2[0, 0] = 1
Example #3
0
def shear_map(e1,e2,nx):
    
    n=e1.shape[0]
    field=numpy.zeros((nx,nx),dtype=double)
    x=double((numpy.indices([nx,nx])[0]))+0.5
    y=double((numpy.indices([nx,nx])[1]))+0.5

    fact=5.

    #matshow(field)
    eps=1.e-9
    
    etot=numpy.sqrt(e1**2+e2**2)
    
    phi=numpy.zeros((nx,nx),dtype=double)
    
    for l in xrange(nx):
        for k in xrange(nx):
            if (etot[l,k]>0):
                phi[l,k]=(math.acos(e1[l,k]/etot[l,k])*e2[l,k]/numpy.abs(e2[l,k]))/2.

    fct=5

    u=fct*etot*numpy.cos(phi)
    v=fct*etot*numpy.sin(phi)
    
    #u=1+numpy.zeros((nx,nx),dtype=double)
    #v=0.5+numpy.zeros((nx,nx),dtype=double)

    width=1
    Q=quiver(x,y,u,v,pivot='middle',units='width',headlength=0,headwidth=0,color='k')
Example #4
0
def binary_mask_multiple(coords_rel, shape, radius, include_edge=True,
                         return_masks=False):
    """Creates multiple elliptical masks.

    Parameters
    ----------
    coords_rel : ndarray (N x 2 or N x 3)
        coordinates
    shape : tuple
        shape of the image
    radius : number or tuple of number
        size of the masks
    """
    ndim = len(shape)
    radius = validate_tuple(radius, ndim)
    coords_rel = np.atleast_2d(coords_rel)

    if include_edge:
        dist = [np.sum(((np.indices(shape).T - coord) / radius)**2, -1) <= 1
                for coord in coords_rel]
    else:
        dist = [np.sum(((np.indices(shape).T - coord) / radius)**2, -1) < 1
                for coord in coords_rel]
    mask_total = np.any(dist, axis=0).T
    masks_single = np.empty((len(coords_rel), mask_total.sum()), dtype=np.bool)
    if return_masks:
        for i, _dist in enumerate(dist):
            masks_single[i] = _dist.T[mask_total]
        return mask_total, masks_single
    else:
        return mask_total
Example #5
0
def prepare_subimage(coords, image, radius, noise_size=None, threshold=None):
    ndim = image.ndim
    radius = validate_tuple(radius, ndim)
    # slice region around cluster
    im, origin = slice_image(coords, image, radius)
    if origin is None:   # coordinates are out of image bounds
        raise RefineException

    # do lowpass filter
    if noise_size is not None:
        if threshold is None:
            threshold = 0
        im = lowpass(im, noise_size, threshold)

    # include the edges where dist == 1 exactly
    dist = [(np.sum(((np.indices(im.shape).T - (coord - origin)) / radius)**2, -1) <= 1)
            for coord in coords]

    # to mask the image
    mask_total = np.any(dist, axis=0).T
    # to mask the masked image
    masks_singles = np.empty((len(coords), mask_total.sum()), dtype=np.bool)
    for i, _dist in enumerate(dist):
        masks_singles[i] = _dist.T[mask_total]

    # create the coordinates
    mesh = np.indices(im.shape, dtype=np.float64)[:, mask_total]
    # translate so that coordinates are in image coordinates
    mesh += np.array(origin)[:, np.newaxis]

    return im[mask_total].astype(np.float64), mesh, masks_singles
Example #6
0
	def merge_transient(self, other_map):
		"""
		Like merge, but only makes a transient change
		"""
		assert isinstance(other_map, Map)
		assert self.resolution == other_map.resolution
		assert self.frame == other_map.frame
		assert self.orient == other_map.orient

		# Add the new map update to our map.

		# apply the local map
		mask = other_map.grid > 0
		i0, j0 = self.index_at(other_map.pos_at(0,0))
		i, j = np.indices(other_map.grid.shape)[:,mask]
		self.grid[i + i0,j + j0] += occupancy_weight # All occupied cells are now only slightly occupied

		# Check to see if we should make a laserscan diff
		if self.last_map_update:
			# Subtract the old laserscan data
			mask = self.last_map_update.grid > 0
			i0, j0 = self.index_at(self.last_map_update.pos_at(0,0))
			i, j = np.indices(self.last_map_update.grid.shape)[:,mask]
			self.grid[i + i0,j + j0] -= occupancy_weight # All occupied cells are now only slightly occupied

		# Save the old laserscan data
		self.last_map_update = other_map
Example #7
0
    def coordinates(self, coord_type='skycoord', origin=0, mode='center'):
        """
        Sky coordinate images.

        Parameters
        ----------
        coord_type : {'pix', 'skycoord', 'galactic'}
            Which type of coordinates to return.
        origin : {0, 1}
            Pixel coordinate origin.
        mode : {'center', 'edges'}
            Return coordinate values at the pixels edges or pixel centers.
        """
        if mode == 'center':
            y, x = np.indices(self.data.shape)
        elif mode == 'edges':
            shape = self.data.shape[0] + 1, self.data.shape[1] + 1
            y, x = np.indices(shape)
            y, x = y - 0.5, x - 0.5
        else:
            raise ValueError('Invalid mode to compute coordinates.')

        if coord_type == 'pix':
            return x, y
        else:
            coordinates = pixel_to_skycoord(x, y, self.wcs, origin)
            if coord_type == 'skycoord':
                return coordinates
            elif coord_type == 'galactic':
                l = coordinates.galactic.l.wrap_at('180d')
                b = coordinates.galactic.b
                return l, b
            else:
                raise ValueError("Not a valid coordinate type. Choose either"
                                 " 'pix' or 'skycoord'.")
Example #8
0
def simpleCentroid(img, threshold_frac=0, **kwargs):
    '''
    Centroids an image, or an array of images.
    Centroids over the last 2 dimensions.
    Sets all values under "threshold_frac*max_value" to zero before centroiding
    '''
    if threshold_frac!=0:
        if len(img.shape)==2:
            img = numpy.where(img>threshold_frac*img.max(), img, 0 )
        else:
            img_temp = (img.T - threshold_frac*img.max(-1).max(-1)).T
            zero_coords = numpy.where(img_temp<0)
            img[zero_coords] = 0

    if len(img.shape)==2:
        y_cent,x_cent = numpy.indices(img.shape)
        y_centroid = (y_cent*img).sum()/img.sum()
        x_centroid = (x_cent*img).sum()/img.sum()

    else:
        y_cent, x_cent = numpy.indices((img.shape[-2],img.shape[-1]))
        y_centroid = (y_cent*img).sum(-1).sum(-1)/img.sum(-1).sum(-1)
        x_centroid = (x_cent*img).sum(-1).sum(-1)/img.sum(-1).sum(-1)

    y_centroid+=0.5
    x_centroid+=0.5

    return numpy.array([y_centroid,x_centroid])
Example #9
0
def generate_result_maps(data, sourcelist):
    """Return a source and residual image

    Given a data array (image) and list of sources, return two images, one
    showing the sources themselves and the other the residual after the
    sources have been removed from the input data.
    """
    residual_map = numpy.array(data) # array constructor copies by default
    gaussian_map = numpy.zeros(residual_map.shape)
    for src in sourcelist:
        # Include everything with 6 times the std deviation along the major
        # axis. Should be very very close to 100% of the flux.
        box_size = 6 * src.smaj.value / math.sqrt(2 * math.log(2))

        lower_bound_x = max(0, int(src.x.value - 1 - box_size))
        upper_bound_x = min(residual_map.shape[0], int(src.x.value - 1 + box_size))
        lower_bound_y = max(0, int(src.y.value - 1 - box_size))
        upper_bound_y = min(residual_map.shape[1], int(src.y.value - 1 + box_size))

        local_gaussian = gaussian(
            src.peak.value,
            src.x.value,
            src.y.value,
            src.smaj.value,
            src.smin.value,
            src.theta.value
        )(
            numpy.indices(residual_map.shape)[0,lower_bound_x:upper_bound_x,lower_bound_y:upper_bound_y],
            numpy.indices(residual_map.shape)[1,lower_bound_x:upper_bound_x,lower_bound_y:upper_bound_y]
        )

        gaussian_map[lower_bound_x:upper_bound_x, lower_bound_y:upper_bound_y] += local_gaussian
        residual_map[lower_bound_x:upper_bound_x, lower_bound_y:upper_bound_y] -= local_gaussian

    return gaussian_map, residual_map
Example #10
0
def test_psi_continuous():
    # first make perfect prediction, including pairwise part
    X, Y = toy.generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
    x, y = X[0], Y[0]
    n_states = x.shape[-1]

    pw_horz = -1 * np.eye(n_states)
    xx, yy = np.indices(pw_horz.shape)
    # linear ordering constraint horizontally
    pw_horz[xx > yy] = 1

    # high cost for unequal labels vertically
    pw_vert = -1 * np.eye(n_states)
    pw_vert[xx != yy] = 1
    pw_vert *= 10

    # create crf, assemble weight, make prediction
    crf = DirectionalGridCRF(n_states=3, inference_method='lp')
    w = np.hstack([np.ones(3), -pw_horz.ravel(), -pw_vert.ravel()])
    y_pred = crf.inference(x, w, relaxed=True)

    # compute psi for prediction
    psi_y = crf.psi(x, y_pred)
    assert_equal(psi_y.shape, (crf.size_psi,))
    # first unary, then horizontal, then vertical
    unary_psi = crf.get_unary_weights(psi_y)
    pw_psi_horz, pw_psi_vert = crf.get_pairwise_weights(psi_y)

    # test unary
    xx, yy = np.indices(y.shape)
    assert_array_almost_equal(unary_psi,
                              np.bincount(y.ravel(), x[xx, yy, y].ravel()))
Example #11
0
def centreOfGravity(img, threshold=0, **kwargs):
    '''
    Centroids an image, or an array of images.
    Centroids over the last 2 dimensions.
    Sets all values under "threshold*max_value" to zero before centroiding
    Origin at 0,0 index of img.

    Parameters:
        img (ndarray): ([n, ]y, x) 2d or greater rank array of imgs to centroid
        threshold (float): Percentage of max value under which pixels set to 0

    Returns:
        ndarray: Array of centroid values (2[, n])

    '''
    if threshold!=0:
        if len(img.shape)==2:
            img = numpy.where(img>threshold*img.max(), img, 0 )
        else:
            img_temp = (img.T - threshold*img.max(-1).max(-1)).T
            zero_coords = numpy.where(img_temp<0)
            img[zero_coords] = 0

    if len(img.shape)==2:
        y_cent,x_cent = numpy.indices(img.shape)
        y_centroid = (y_cent*img).sum()/img.sum()
        x_centroid = (x_cent*img).sum()/img.sum()

    else:
        y_cent, x_cent = numpy.indices((img.shape[-2],img.shape[-1]))
        y_centroid = (y_cent*img).sum(-1).sum(-1)/img.sum(-1).sum(-1)
        x_centroid = (x_cent*img).sum(-1).sum(-1)/img.sum(-1).sum(-1)

    return numpy.array([x_centroid, y_centroid])
 def prep_image(self):
   """Takes the solved coordinate system and makes a piecewise \
   transform on the origin image to the target image"""
   transform = ProjectiveTransform()
   self.coord_solver.coordinates = self.coord_solver.min_coords.copy()
   self.new_image = np.zeros(self.coord_solver.image.shape)
   coords = np.array([self.coord_solver.coordinates[x:x+2, y:y+2, :].reshape([4, 2]) for x in \
     range(self.coord_solver.coordinates.shape[0]) for y in range(self.coord_solver.coordinates.shape[1]) \
     if (self.coord_solver.coordinates[x:x+2, y:y+2, :].shape == (2, 2, 2))])
   canonical_coords = np.indices((self.coord_solver.width, self.coord_solver.height)).T.astype('float32')
   flattened_canonical = np.array([canonical_coords[x:x+2, y:y+2, :].reshape([4, 2]) for x in \
     range(canonical_coords.shape[0]-1) for y in range(canonical_coords.shape[1]-1)])
   mesh_size = self.coord_solver.mesh_factor
   print "needs %s calcs" % coords.shape[0]
   coord_grid = np.indices(self.coord_solver.image.shape[:-1]).T.astype('float32').reshape(-1,2)
   for k in range(coords.shape[0]):
     des = mesh_size*coords[k, :, :]
     canon_coord = mesh_size*flattened_canonical[k, :, :]
     src = mesh_size*flattened_canonical[0, :, :]
     if not transform.estimate(des, canon_coord):
       raise Exception("estimate failed at %s" % str(k))
     area_in_question_x = canon_coord[0, 0].astype(int)
     area_in_question_y = canon_coord[0, 1].astype(int)
     scaled_area = tf.warp(self.coord_solver.image, transform)
     area_path = path.Path([des[0],des[1],des[3],des[2],des[0]])
     points_in_area = area_path.contains_points(coord_grid,radius=0.00001).reshape(self.coord_solver.image.shape[:-1])
     self.new_image += scaled_area*points_in_area[:,:,np.newaxis]
Example #13
0
def find_mode(ndarray,axis=0):
    if ndarray.size == 1:
        return (ndarray[0],1)
    elif ndarray.size == 0:
        raise Exception('Attempted to find mode on an empty array!')
    try:
        axis = [i for i in range(ndarray.ndim)][axis]
    except IndexError:
        raise Exception('Axis %i out of range for array with %i dimension(s)' % (axis,ndarray.ndim))

    srt = numpy.sort(ndarray,axis=axis)
    dif = numpy.diff(srt,axis=axis)
    shape = [i for i in dif.shape]
    shape[axis] += 2
    indices = numpy.indices(shape)[axis]
    index = tuple([slice(None) if i != axis else slice(1,-1) for i in range(dif.ndim)])
    indices[index][dif == 0] = 0
    indices.sort(axis=axis)
    bins = numpy.diff(indices,axis=axis)
    location = numpy.argmax(bins,axis=axis)
    mesh = numpy.indices(bins.shape)
    index = tuple([slice(None) if i != axis else 0 for i in range(dif.ndim)])
    index = [mesh[i][index].ravel() if i != axis else location.ravel() for i in range(bins.ndim)]
    #counts = bins[tuple(index)].reshape(location.shape)
    index[axis] = indices[tuple(index)]
    modals = srt[tuple(index)].reshape(location.shape)
    mode = modals[()]
    return (mode)
Example #14
0
    def vmax_discretized(self, Value, s, xij, i, j):

        nx = self.dims.nx
        ns = s.shape[-1]
        dx = self.dims.dx
        X = self.options.X

        vv = np.full((nx, ns), -np.inf)

        xl, xu = self.bounds(s, i, j)
        xl = xl.T
        xu = xu.T

        for h, x0 in enumerate(X.T):
            is_= np.all((xl <= x0) & (x0 <= xu), 1)
            if np.any(is_):
                xx = np.repeat(x0, ns, 0)
                vv[h, is_] = self.__Bellman_rhs_discrete(Value, xx, s, i, j)

        xmax = np.argmax(vv, 0)

        vxs = [a[0] for a in np.indices(vv.shape)]  # the [0] reduces one dimension
        vxs[0] = xmax
        vij = vv[vxs]

        xxs = [a[0] for a in np.indices(X.T.shape)]
        xxs[0] = xmax
        xij[:] = X.T[xxs]

        return vij
 def __init__(self, image=None, mesh_factor=14, density_distribution=None):
   super(CoordinateSolver, self).__init__()
   self.image = image
   self.height, self.width, _ = self.image.shape
   self.mesh_factor = mesh_factor
   self.height /= self.mesh_factor
   self.width /= self.mesh_factor
   self.image = self.image[:self.mesh_factor*self.height, :self.mesh_factor*self.width]
   if type(density_distribution) == np.ndarray:
     restricted_density = density_distribution[:self.mesh_factor*self.height, :self.mesh_factor*self.width]
     target_areas = restricted_density
     target_areas = target_areas[:-1, :-1]
   else:
     target_areas = np.indices((self.width-1, self.height-1)).T.astype('float32')
     target_areas = norm.pdf(target_areas[:, :, 0], self.width/2, self.width/5)\
                   *norm.pdf(target_areas[:, :, 1], self.height/2, self.height/5)
   target_areas /= sum(sum(target_areas))
   
   normalisation_factor = (self.height-1)*(self.width-1)
   target_areas_normalised = target_areas*normalisation_factor
   self.padded_targets = np.zeros([self.height+1, self.width+1])
   self.padded_targets[1:-1, 1:-1] = target_areas_normalised
   self.coordinates = np.indices((self.width, self.height)).T.astype('float32')
   self.total_error = (self.height-1)*(self.width-1)
   
   self.min_coords = self.coordinates.copy()
   self.areas = calculate_areas(self.coordinates)
   self.errors = np.zeros(self.padded_targets.shape)
   self.x_weights = np.ones([self.height*self.width, self.height + 1, self.width + 1])
   self.y_weights = np.ones([self.height*self.width, self.height + 1, self.width + 1])
   self.make_weights()
    def get_resources(self, data_dictionary, dataset):
        """Create resources for computing a variable. """
        resources=Resources()
        for key in data_dictionary.keys():
            if key in self.datasets:
                data = data_dictionary[key]
                if self.id_names[key] not in data_dictionary[key].keys() and not isinstance(self.id_names[key], list):
            
                    data[self.id_names[key]] = arange(1,\
                        len(data_dictionary[key][data_dictionary[key].keys()[0]])+1) # add id array
                
                if key == "land_cover":
                    land_cover_storage = StorageFactory().get_storage('dict_storage')
                    land_cover_table_name = 'land_cover'
                    land_cover_storage.write_table(
                            table_name=land_cover_table_name,
                            table_data=data,
                        )

                    lc = LandCoverDataset(
                        in_storage=land_cover_storage, 
                        in_table_name=land_cover_table_name, 
                        )
                        
                    # add relative_x and relative_y
                    lc.get_id_attribute()
                    n = int(ceil(sqrt(lc.size())))
                    
                    if "relative_x" not in data.keys():
                        x = (indices((n,n))+1)[1].ravel()
                        lc.add_attribute(x[0:lc.size()], "relative_x", metadata=1)
                    if "relative_y" not in data.keys():
                        y = (indices((n,n))+1)[0].ravel()
                        lc.add_attribute(y[0:lc.size()], "relative_y", metadata=1)
                        
                    resources.merge({key: lc})
                    
                if key == "gridcell":
                    gridcell_storage = StorageFactory().get_storage('dict_storage')
                    gridcell_table_name = 'gridcell'
                    gridcell_storage.write_table(
                            table_name=gridcell_table_name,
                            table_data=data,
                        )
                    
                    gridcell_dataset = GridcellDataset(
                        in_storage = gridcell_storage,
                        in_table_name = gridcell_table_name,
                        )
                    
                    resources.merge({key: gridcell_dataset})
            else:
                resources.merge({key:data_dictionary[key]})

        if dataset in self.interactions:
            pass
        else:
            resources.merge({"dataset": resources[dataset]})
        resources.merge({"check_variables":'*', "debug":4})
        return resources
Example #17
0
    def coordinates(self, coord_type='world', origin=0, mode='center'):
        """
        Sky coordinate images.

        Parameters
        ----------
        coord_type : {'world', 'pix', 'skycoord'}
            Which type of coordinates to return.
        origin : {0, 1}
            Pixel coordinate origin.
        mode : {'center', 'edges'}
            Return coordinate values at the pixels edges or pixel centers.
        """
        if mode == 'center':
            y, x = np.indices(self.data.shape)
        elif mode == 'edges':
            shape = self.data.shape[0] + 1, self.data.shape[1] + 1 
            y, x = np.indices(shape)
            y, x = y - 0.5, x - 0.5
        else:
            raise ValueError('Invalid mode to compute coordinates.')
        
        if coord_type == 'pix':
            return x, y
        else:
            xsky, ysky = self.wcs.wcs_pix2world(x, y, origin)
            l, b = Longitude(xsky, unit='deg'), Latitude(ysky, unit='deg')
            l = l.wrap_at('180d')
            if coord_type == 'world':
                return l.degree, b.degree
            elif coord_type == 'skycoord': 
                return l, b
            else:
                raise ValueError("Not a valid coordinate type. Choose either"
                                 " 'world', 'pix' or 'skycoord'.")
Example #18
0
	def __init__(self, dim=2, N=256, n=-2.5, t=None, seed=None, scale=1, name="zeldovich approximation"):
		super(Zeldovich, self).__init__(name=name)

		if seed is not None:
			np.random.seed(seed)
		#sys.exit(0)
		shape = (N,) * dim
		A = np.random.normal(0.0, 1.0, shape)
		F = np.fft.fftn(A)
		K = np.fft.fftfreq(N, 1./(2*np.pi))[np.indices(shape)]
		k = (K**2).sum(axis=0)
		k_max = np.pi
		F *= np.where(np.sqrt(k) > k_max, 0, np.sqrt(k**n) * np.exp(-k*4.0))
		F.flat[0] = 0
		#pylab.imshow(np.where(sqrt(k) > k_max, 0, np.sqrt(k**-2)), interpolation='nearest')
		grf = np.fft.ifftn(F).real
		Q = np.indices(shape) / float(N-1) - 0.5
		s = np.array(np.gradient(grf)) / float(N)
		#pylab.imshow(s[1], interpolation='nearest')
		#pylab.show()
		s /= s.max() * 100.
		#X = np.zeros((4, 3, N, N, N))
		#for i in range(4):
		#if t is None:
		#	s = s/s.max()
		t = t or 1.
		X = Q + s * t

		for d, name in zip(list(range(dim)), "xyzw"):
			self.add_column(name, X[d].reshape(-1) * scale)
		for d, name in zip(list(range(dim)), "xyzw"):
			self.add_column("v"+name, s[d].reshape(-1) * scale)
		for d, name in zip(list(range(dim)), "xyzw"):
			self.add_column(name+"0", Q[d].reshape(-1) * scale)
		return
Example #19
0
def directionality_filter(filtered, angle=10, combine=True):
    """
    Finds the maximum filter response for each pixel.

    Returns the maximum filter response and the angle of maximum response.

    """
    f2 = np.power(filtered, 2)

    n_angles = int(180 / angle)
    n_freqs = int(filtered.shape[2] / n_angles)

    if combine:
        f2_combined = np.dstack(f2[:, :, i::n_angles].sum(axis=2)
                                for i in range(n_angles))
        max_angle_idx = np.argmax(f2_combined, axis=2)
        x, y = np.indices(max_angle_idx.shape)
        magnitude = f2[x, y, max_angle_idx]

        angles = np.arange(0, 180, angle)
        max_angles = angles[max_angle_idx]
    else:
        angles = np.hstack(list(np.arange(0, 180, angle)
                                for f in range(n_freqs)))
        idx = np.argmax(filtered, axis=2)
        x, y = np.indices(idx.shape)
        magnitude = f2[x, y, idx]

        max_angles = angles[idx]

    magnitude = magnitude / np.mean(f2, axis=2)
    return magnitude, max_angles
Example #20
0
    def __init__(self, param):

        # create spatial sources
        num_grid = param.get('gridpoints', 9)
        pixel = np.indices(param['shape'])
        p_dist = param['shape'][0] / num_grid
        self.points = np.indices((num_grid, num_grid)) * p_dist + p_dist
        self.points = zip(self.points[0].flatten(), self.points[1].flatten())
        random.shuffle(self.points)
        components = [gaussian_influence(mu, param['width'])(pixel[0], pixel[1])
                  for mu in self.points[:param['latents']]]
        self.spt_sources = np.array([i.flatten() for i in components])

        # generate activation timcourses
        covgroups = param.get('covgroups', 4)
        self.cov = group_covmtx(param['cov'], 0.1, covgroups, param['latents'] / covgroups)
        marginal_dist = adjusted_gamma(param['mean'], param['var'])
        self.activ_pre = correlated_samples(self.cov, param['no_samples'],
                                             marginal_dist).T
        self.activ_pre[np.isnan(self.activ_pre)] = 0
        # fold with single stim timecourse
        if param['act_time']:
            self.activation = np.vstack([np.outer(i, param['act_time']).flatten()
                                    for i in self.activ_pre]).T
        self.observed_raw = np.dot(self.activation, self.spt_sources)

        # add noise
        noise = param['noisevar'] * np.random.randn(*self.observed_raw.shape)
        self.observed = self.observed_raw.copy() + noise
Example #21
0
def _get_k(input_array, box_dims):
	'''
	Get the k values for input array with given dimensions.
	Return k components and magnitudes.
	For internal use.
	'''
	dim = len(input_array.shape)
	if dim == 1:
		x = np.arange(len(input_array))
		center = x.max()/2.
		kx = 2.*np.pi*(x-center)/box_dims[0]
		return [kx], kx
	elif dim == 2:
		x,y = np.indices(input_array.shape, dtype='int32')
		center = np.array([(x.max()-x.min())/2, (y.max()-y.min())/2])
		kx = 2.*np.pi * (x-center[0])/box_dims[0]
		ky = 2.*np.pi * (y-center[1])/box_dims[1]
		k = np.sqrt(kx**2 + ky**2)
		return [kx, ky], k
	elif dim == 3:
		x,y,z = np.indices(input_array.shape, dtype='int32')
		center = np.array([(x.max()-x.min())/2, (y.max()-y.min())/2, \
						(z.max()-z.min())/2])
		kx = 2.*np.pi * (x-center[0])/box_dims[0]
		ky = 2.*np.pi * (y-center[1])/box_dims[1]
		kz = 2.*np.pi * (z-center[2])/box_dims[2]

		k = get_eval()('(kx**2 + ky**2 + kz**2 )**(1./2.)') 		
		return [kx,ky,kz], k
Example #22
0
def dilate(cube, mask, objects, cathead, Parameters):
    dilateThreshold = Parameters["parameters"]["dilateThreshold"]
    dilatePixMax = Parameters["parameters"]["dilatePixMax"]
    dilateChan = Parameters["parameters"]["dilateChan"]
    # stops dilating when (flux_new-flux_old)/flux_new < dilateThreshold
    for mm in range(1, mask.max() + 1):
        obj = objects[mm - 1]
        xmin = obj[list(cathead).index("x_min")] - dilatePixMax
        xmax = obj[list(cathead).index("x_max")] + dilatePixMax
        ymin = obj[list(cathead).index("y_min")] - dilatePixMax
        ymax = obj[list(cathead).index("y_max")] + dilatePixMax
        zmin = obj[list(cathead).index("z_min")] - dilateChan
        zmax = obj[list(cathead).index("z_max")] + dilateChan
        xmin = max(0, xmin)
        xmax = min(xmax, cube.shape[2] - 1)
        ymin = max(0, ymin)
        ymax = min(ymax, cube.shape[1] - 1)
        zmin = max(0, zmin)
        zmax = min(zmax, cube.shape[0] - 1)
        objcube = cube[zmin : zmax + 1, ymin : ymax + 1, xmin : xmax + 1].copy()
        objmask = mask[zmin : zmax + 1, ymin : ymax + 1, xmin : xmax + 1].copy()
        allmask = mask[zmin : zmax + 1, ymin : ymax + 1, xmin : xmax + 1].copy()
        otherobjs = (allmask > 0) * (allmask != mm)
        if (otherobjs).sum():
            # Ensure that objects!=mm within dilatePixMax, dilateChan are not included in the flux growth calculation
            print "WARNING: object %i has possible overlapping objects within %i pix, %i chan" % (
                mm,
                dilatePixMax,
                dilateChan,
            )
            objcube[(allmask > 0) * (allmask != mm)] = 0
        fluxes = []
        for dil in range(dilatePixMax + 1):
            dd = dil * 2 + 1
            dilstruct = (np.sqrt(((np.indices((dd, dd)) - dil) ** 2).sum(axis=0)) <= dil).astype(int)
            dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1]))
            dilstruct = dilstruct.repeat(dilateChan * 2 + 1, axis=0)
            fluxes.append(objcube[nd.morphology.binary_dilation(objmask == mm, structure=dilstruct)].sum())
            if dil > 0 and (fluxes[-1] - fluxes[-2]) / fluxes[-1] < dilateThreshold:
                break
        # pick the best dilation kernel for current object and update mask
        dil -= 1
        print "Mask dilation of source %i by %i pix and %i chan" % (mm, dil, dilateChan)
        sys.stdout.flush()
        dd = dil * 2 + 1
        dilstruct = (np.sqrt(((np.indices((dd, dd)) - dil) ** 2).sum(axis=0)) <= dil).astype(int)
        dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1]))
        dilstruct = dilstruct.repeat(dilateChan * 2 + 1, axis=0)
        # Only grow the mask of object mm even when other objects are present in objmask
        objmask[nd.morphology.binary_dilation(objmask == mm, structure=dilstruct).astype(int) == 1] = mm
        # Put back in objmask objects!=mm that may have been inside objmask before dilation or may have been temporarily replaced by the dilated object mm
        if (otherobjs).sum():
            objmask[otherobjs] = allmask[otherobjs]
        mask[zmin : zmax + 1, ymin : ymax + 1, xmin : xmax + 1] = objmask
        del (objcube)
        del (objmask)
        del (allmask)
        del (otherobjs)
    return mask
Example #23
0
def level_image(im, poly_vert=1, poly_horiz=1, box=None, poly=None, mode="clip"):
    """Subtract a polynomial background from image

    Keword Arguments:
        poly_vert (int): fit a polynomial in the vertical direction for the image of order
            given. If 0 do not fit or subtract in the vertical direction
        poly_horiz (int): fit a polynomial of order poly_horiz to the image. If 0 given
            do not subtract
        box (array, list or tuple of int): [xmin,xmax,ymin,ymax] define region for fitting. IF None use entire
            image
        poly (list or None): [pvert, phoriz] pvert and phoriz are arrays of polynomial coefficients
            (highest power first) to subtract in the horizontal and vertical
            directions. If None function defaults to fitting its own polynomial.
        mode (str): Either 'clip' or 'norm' - specifies how to handle intensitry values that end up being outside
            of the accepted range for the image.

    Returns:
        A new copy of the processed images.

    Fit and subtract a background to the image. Fits a polynomial of order
    given in the horizontal and vertical directions and subtracts. If box
    is defined then level the *entire* image according to the
    gradient within the box. The polynomial subtracted is added to the
    metadata as 'poly_vert_subtract' and 'poly_horiz_subtract'
    """
    if box is None:
        box = im.max_box
    cim = im.crop_image(box=box)
    (vertl, horizl) = cim.shape
    p_horiz = 0
    p_vert = 0
    if poly_horiz > 0:
        comp_vert = np.average(cim, axis=0)  # average (compress) the vertical values
        if poly is not None:
            p_horiz = poly[0]
        else:
            p_horiz = np.polyfit(np.arange(horizl), comp_vert, poly_horiz)  # fit to the horizontal
            av = np.average(comp_vert)  # get the average pixel height
            p_horiz[-1] = p_horiz[-1] - av  # maintain the average image height
        horizcoord = np.indices(im.shape)[1]  # now apply level to whole image
        for i, c in enumerate(p_horiz):
            im = im - c * horizcoord ** (len(p_horiz) - i - 1)
    if poly_vert > 0:
        comp_horiz = np.average(cim, axis=1)  # average the horizontal values
        if poly is not None:
            p_vert = poly[1]
        else:
            p_vert = np.polyfit(np.arange(vertl), comp_horiz, poly_vert)
            av = np.average(comp_horiz)
            p_vert[-1] = p_vert[-1] - av  # maintain the average image height
        vertcoord = np.indices(im.shape)[0]
        for i, c in enumerate(p_vert):
            im = im - c * vertcoord ** (len(p_vert) - i - 1)
    im.metadata["poly_sub"] = (p_horiz, p_vert)
    if mode == "clip":
        im = im.clip_intensity()  # saturate any pixels outside allowed range
    elif mode == "norm":
        im = im.normalise()
    return im
Example #24
0
    def polar_workspace_init(radial_bins=256, angular_bins=256,
                             max_radius=None, centre=None): 
        #if (centre == None) and self.centre == None:
            #pass # Raise an exception

        xdim = self.image.shape[0]
        ydim = self.image.shape[1]

        if centre == None:
            xc = xdim * 0.5
            yc = ydim * 0.5 
        else:
            xc = centre[0]
            yc = centre[1]

        # Calculate minimum distance from centre to edge of image - this
        # determines the maximum radius in the polar image
        xsize = min (xdim + 0.5 - xc, xc)
        ysize = min (ydim + 0.5 - yc, yc)
        max_rad = m.sqrt(xsize**2 + ysize**2)

        if max_radius == None:
            max_radius = max_rad
        elif max_radius > max_rad:
            raise ValueError
        
        # Set up interpolation - cubic spline with no smoothing by default 
        x = numpy.indices((xdim,)) + 0.5 - centre[0]
        y = numpy.indices((ydim,)) + 0.5 - centre[1]
        interp = spint.RectBivariateSpline(x, y, self.image)

        # Polar image bin widths
        theta_bin_width = (2.0 * math.pi) / (theta_bins - 1.0)
        radial_bin_width = max_radius / (radial_bins - 1.0)

        # Calculate polar image values - use vectorization for efficiency
        # Because we broadcast when using a ufunc (created by frompyfunc
        # below), we could get away with an ogrid here to save time and space?
        r, theta = numpy.mgrid[0:radial_bins, 0:angular_bins]
        theta = (theta + 0.5) * theta_bin_width
        r = (r + 0.5) * radial_bin_width

        def polar_pix_val(r, theta):
            # Should we use the numpy.sin/cos functions here for more
            # efficiency ?
            return interp.ev(r * m.sin(theta), r * m.cos(theta))

        numpy.frompyfunc(polar_pix_val, 2, 1)
        self.pimage = polar_pix_val(r, theta)

        # Calculate polar image values - non-vectorized version
        self.pimage = numpy.empty(radial_bins, angular_bins)
        for r in radial_bins:
            R = (r + 0.5) * radial_bin_width;
            for t in theta_bins:
                theta = (t + 0.5) * theta_bin_width
                x = R * sin(theta)
                y = R * cos(theta)
                self.pimage[r, t] = interp.ev(x, y)
Example #25
0
def av2(Z, w1, w2, index = 0):
    o = np.indices(Z.shape, Z.dtype)[index]
    o1, o2 = np.indices(Z.shape, Z.dtype)[-2:]
    lW = np.log(Z) + o1 * np.log(w1) + o2 * np.log(w2)
    W = np.exp(lW - lW.max() + np.log(1e300))
    A = (W).sum(-1).sum(-1)
    B = (W * o).sum(-1).sum(-1)
    return B/A
def test_mix_types():
    f = np.zeros((64,64), np.uint16)
    f += (np.indices(f.shape)[1]**2).astype(np.uint16)
    f += ((np.indices(f.shape)[0]-23)**2).astype(np.uint16)
    markers = np.zeros((64,64), np.int64)
    markers[32,32] = 1
# Below used to force a crash (at least in debug mode)
    a,b = mahotas.cwatershed(f, markers, return_lines=1)
Example #27
0
def radial_average(input_array, box_dims, bins=10, weights=None):
	''' 
	Radially average data.
	Parameters:
		* input_array --- array containing the data to be averaged
		* box_dims  --- tuple with the size of the box in comoving Mpc along each axis
	kwargs:
		* bins = 10 --- the k bins. Can be an integer specifying the number of bins,
		or an array with the bin edges 
	Returns:
		Tuple containing the binned data and the bin edges
	'''
	
	if weights != None:
		input_array *= weights

	#Make an array containing distances to the center
	dim = len(input_array.shape)
	if dim == 2:
		x,y = np.indices(input_array.shape)
		center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
		kx = (x-center[0])/box_dims[0]
		ky = (y-center[1])/box_dims[1]
	elif dim == 3:
		x,y,z = np.indices(input_array.shape)
		center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0, (z.max()-z.min())/2.0])
		kx = (x-center[0])/box_dims[0]
		ky = (y-center[1])/box_dims[1]
		kz = (z-center[2])/box_dims[2]

	else:
		raise Exception('Check your dimensions!')

	#Calculate k values
	if dim == 3:
		k = np.sqrt(kx**2 + ky**2 + kz**2 ) * 2.*np.pi
	else:
		k = np.sqrt(kx**2 + ky**2 ) * 2.*np.pi

	#If bins is an integer, make linearly spaced bins
	if isinstance(bins,int):
		kmin = 2.*np.pi/min(box_dims)
		bins = np.linspace(kmin, k.max(), bins+1)
	
	#Bin the data
	nbins = len(bins)-1
	dk = (bins[1:]-bins[:-1])/2.
	outdata = np.zeros(nbins)
	for ki in range(nbins):
		kmin = bins[ki]
		kmax = bins[ki+1]
		idx = (k >= kmin) * (k < kmax)
		outdata[ki] = np.mean(input_array[idx])

		if weights != None:
			outdata[ki] /= weights[idx].mean()

	return outdata, bins[:-1]+dk
Example #28
0
 def level_image(self, poly_vert=1, poly_horiz=1, box=None, poly=None):
     """Subtract a polynomial background from image
     
     Fit and subtract a background to the image. Fits a polynomial of order
     given in the horizontal and vertical directions and subtracts. If box 
     is defined then level the *entire* image according to the 
     gradient within the box.
     
     Parameters
     ----------
     poly_vert: int
         fit a polynomial in the vertical direction for the image of order 
         given. If 0 do not fit or subtract in the vertical direction
     poly_horiz: int
         fit a polynomial of order poly_horiz to the image. If 0 given
         do not subtract
     box: array, list or tuple of int
         [xmin,xmax,ymin,ymax] define region for fitting. IF None use entire
         image
     poly: list or None
         [pvert, phoriz] pvert and phoriz are arrays of polynomial coefficients
         (highest power first) to subtract in the horizontal and vertical 
         directions. If None function defaults to fitting its own polynomial.
         
     Returns
     -------
     im: KerrArray
         the levelled image
     """
     if box is None:
         box=self.max_box
     cim=self.crop_image(box=box)
     (vertl,horizl)=cim.shape
     if poly_horiz>0:
         comp_vert = np.average(cim, axis=0) #average (compress) the vertical values
         if poly is not None:
             p=poly[0]
         else:
             p=np.polyfit(np.arange(horizl),comp_vert,poly_horiz) #fit to the horizontal
             av=np.average(comp_vert) #get the average pixel height
             p[-1]=p[-1]-av #maintain the average image height
         horizcoord=np.indices(self.shape)[1] #now apply level to whole image 
         for i,c in enumerate(p):
             self=self-c*horizcoord**(len(p)-i-1)
         self.metadata['poly_vert_subtract']=p
     if poly_vert>0:
         comp_horiz = np.average(cim, axis=1) #average the horizontal values
         if poly is not None:
             p=poly[1]
         else:
             p=np.polyfit(np.arange(vertl),comp_horiz,poly_vert)
             av=np.avearage(comp_horiz)
             p[-1]=p[-1]-av #maintain the average image height
         vertcoord=np.indices(self.shape)[0]
         for i,c in enumerate(p):
             self=self-c*vertcoord**(len(p)-i-1)
         self.metadata['poly_horiz_subtract']=p
     return self
Example #29
0
def st2(Z, w1, w2, index = 0):
    o = np.indices(Z.shape, double)[index]
    o1, o2 = np.indices(Z.shape, double)[-2:]
    lW = np.log(Z) + o1 * np.log(w1) + o2 * np.log(w2)
    W = np.exp(lW - lW.max() + np.log(1e300))
    A = (W).sum(-1).sum(-1)
    B = (W * o).sum(-1).sum(-1)
    C = (W * o ** 2).sum(-1).sum(-1)
    return C/A - (B/A)**2
Example #30
0
def test_mix_types():
    "[watershed regression]: Mixing types of surface and marker arrays used to cause crash"
    f = np.zeros((64,64), np.uint16)
    f += (np.indices(f.shape)[1]**2).astype(np.uint16)
    f += ((np.indices(f.shape)[0]-23)**2).astype(np.uint16)
    markers = np.zeros((64,64), np.int64)
    markers[32,32] = 1
# Below used to force a crash (at least in debug mode)
    a,b = mahotas.cwatershed(f, markers, return_lines=1)