def get_exmaple_regions(Page_Dist, Our_Docs, F,Per_Small, Per_Large0):
	Cut = 3 
	ind0 = np.round(np.random.uniform(0,len(F)))
	print(ind0)
	tA = F.tran_mat_index1.tolist()[int(ind0)]
	tB = F.tran_mat_index2.tolist()[int(ind0)]
	TFA = (Page_Dist[tA,:]<Per_Small) & (Page_Dist[tB,:]>Per_Large0)
	indexes=np.indices(Page_Dist.shape)
	indA = indexes[1,0,TFA]
	indApd=pd.DataFrame({'tran_mat_index':indA})
	indApd['Region'] = 'A'
	indApd['dist_to_A'] = Page_Dist[tA,indA] 
	indApd['dist_to_B'] = Page_Dist[tB,indA]
	indApd=indApd.sort_index(by = 'dist_to_A')
	indApd=indApd[0:Cut:1]
	TFB = (Page_Dist[tB,:]<Per_Small) & (Page_Dist[tA,:]>Per_Large0)
	indexes=np.indices(Page_Dist.shape)
	indB =indexes[1,0,TFB]
	indBpd=pd.DataFrame({'tran_mat_index':indB})
	indBpd['Region'] = 'B'
	indBpd['dist_to_A'] = Page_Dist[tA,indB] 
	indBpd['dist_to_B'] = Page_Dist[tB,indB]
	indBpd=indBpd.sort_index(by = 'dist_to_B')
	indBpd=indBpd[0:Cut:1]
	Info = pd.concat([indApd,indBpd])
	Info = join_replace(Info,Our_Docs[['usid_index','tran_mat_index','year','parties']],'tran_mat_index')
	MN = np.min(np.array([np.sum(TFA),np.sum(TFB)]))
	if MN<Cut: print("Too small")
	print(Info.to_string())
	return Info
Пример #2
0
	def __init__(self, Nm, Nf, L, qpf = 3):
		self.Nm = Nm
		self.Nf = Nf
		self.L = L
		self.qpf = qpf

		self.mass_res = L / Nm
		self.force_res = L / Nf

		self.X = np.indices((Nm, Nm)).astype(float)
		self.MX = self.X * self.mass_res

		# subdivide mass elements
		self.XX = (self.X.transpose([1,2,0]).reshape([Nm**2,2])[:,np.newaxis,:] + \
				subdiv_unitcell[self.qpf]).reshape([Nm**2 * 2**(2*self.qpf), 2])

		self.FX = np.indices((Nf, Nf)).astype(float) * self.force_res

		# k-values, divide by resolution to get physical scales
		self.Km = self.make_K(Nm)
		self.km2 = (self.Km**2).sum(axis=0)
		self.km2[0, 0] = 1

		self.Kf = self.make_K(Nf)
		self.kf2 = (self.Kf**2).sum(axis=0)
		self.kf2[0, 0] = 1
Пример #3
0
def shear_map(e1,e2,nx):
    
    n=e1.shape[0]
    field=numpy.zeros((nx,nx),dtype=double)
    x=double((numpy.indices([nx,nx])[0]))+0.5
    y=double((numpy.indices([nx,nx])[1]))+0.5

    fact=5.

    #matshow(field)
    eps=1.e-9
    
    etot=numpy.sqrt(e1**2+e2**2)
    
    phi=numpy.zeros((nx,nx),dtype=double)
    
    for l in xrange(nx):
        for k in xrange(nx):
            if (etot[l,k]>0):
                phi[l,k]=(math.acos(e1[l,k]/etot[l,k])*e2[l,k]/numpy.abs(e2[l,k]))/2.

    fct=5

    u=fct*etot*numpy.cos(phi)
    v=fct*etot*numpy.sin(phi)
    
    #u=1+numpy.zeros((nx,nx),dtype=double)
    #v=0.5+numpy.zeros((nx,nx),dtype=double)

    width=1
    Q=quiver(x,y,u,v,pivot='middle',units='width',headlength=0,headwidth=0,color='k')
Пример #4
0
def binary_mask_multiple(coords_rel, shape, radius, include_edge=True,
                         return_masks=False):
    """Creates multiple elliptical masks.

    Parameters
    ----------
    coords_rel : ndarray (N x 2 or N x 3)
        coordinates
    shape : tuple
        shape of the image
    radius : number or tuple of number
        size of the masks
    """
    ndim = len(shape)
    radius = validate_tuple(radius, ndim)
    coords_rel = np.atleast_2d(coords_rel)

    if include_edge:
        dist = [np.sum(((np.indices(shape).T - coord) / radius)**2, -1) <= 1
                for coord in coords_rel]
    else:
        dist = [np.sum(((np.indices(shape).T - coord) / radius)**2, -1) < 1
                for coord in coords_rel]
    mask_total = np.any(dist, axis=0).T
    masks_single = np.empty((len(coords_rel), mask_total.sum()), dtype=np.bool)
    if return_masks:
        for i, _dist in enumerate(dist):
            masks_single[i] = _dist.T[mask_total]
        return mask_total, masks_single
    else:
        return mask_total
Пример #5
0
def prepare_subimage(coords, image, radius, noise_size=None, threshold=None):
    ndim = image.ndim
    radius = validate_tuple(radius, ndim)
    # slice region around cluster
    im, origin = slice_image(coords, image, radius)
    if origin is None:   # coordinates are out of image bounds
        raise RefineException

    # do lowpass filter
    if noise_size is not None:
        if threshold is None:
            threshold = 0
        im = lowpass(im, noise_size, threshold)

    # include the edges where dist == 1 exactly
    dist = [(np.sum(((np.indices(im.shape).T - (coord - origin)) / radius)**2, -1) <= 1)
            for coord in coords]

    # to mask the image
    mask_total = np.any(dist, axis=0).T
    # to mask the masked image
    masks_singles = np.empty((len(coords), mask_total.sum()), dtype=np.bool)
    for i, _dist in enumerate(dist):
        masks_singles[i] = _dist.T[mask_total]

    # create the coordinates
    mesh = np.indices(im.shape, dtype=np.float64)[:, mask_total]
    # translate so that coordinates are in image coordinates
    mesh += np.array(origin)[:, np.newaxis]

    return im[mask_total].astype(np.float64), mesh, masks_singles
Пример #6
0
	def merge_transient(self, other_map):
		"""
		Like merge, but only makes a transient change
		"""
		assert isinstance(other_map, Map)
		assert self.resolution == other_map.resolution
		assert self.frame == other_map.frame
		assert self.orient == other_map.orient

		# Add the new map update to our map.

		# apply the local map
		mask = other_map.grid > 0
		i0, j0 = self.index_at(other_map.pos_at(0,0))
		i, j = np.indices(other_map.grid.shape)[:,mask]
		self.grid[i + i0,j + j0] += occupancy_weight # All occupied cells are now only slightly occupied

		# Check to see if we should make a laserscan diff
		if self.last_map_update:
			# Subtract the old laserscan data
			mask = self.last_map_update.grid > 0
			i0, j0 = self.index_at(self.last_map_update.pos_at(0,0))
			i, j = np.indices(self.last_map_update.grid.shape)[:,mask]
			self.grid[i + i0,j + j0] -= occupancy_weight # All occupied cells are now only slightly occupied

		# Save the old laserscan data
		self.last_map_update = other_map
Пример #7
0
    def coordinates(self, coord_type='skycoord', origin=0, mode='center'):
        """
        Sky coordinate images.

        Parameters
        ----------
        coord_type : {'pix', 'skycoord', 'galactic'}
            Which type of coordinates to return.
        origin : {0, 1}
            Pixel coordinate origin.
        mode : {'center', 'edges'}
            Return coordinate values at the pixels edges or pixel centers.
        """
        if mode == 'center':
            y, x = np.indices(self.data.shape)
        elif mode == 'edges':
            shape = self.data.shape[0] + 1, self.data.shape[1] + 1
            y, x = np.indices(shape)
            y, x = y - 0.5, x - 0.5
        else:
            raise ValueError('Invalid mode to compute coordinates.')

        if coord_type == 'pix':
            return x, y
        else:
            coordinates = pixel_to_skycoord(x, y, self.wcs, origin)
            if coord_type == 'skycoord':
                return coordinates
            elif coord_type == 'galactic':
                l = coordinates.galactic.l.wrap_at('180d')
                b = coordinates.galactic.b
                return l, b
            else:
                raise ValueError("Not a valid coordinate type. Choose either"
                                 " 'pix' or 'skycoord'.")
Пример #8
0
def simpleCentroid(img, threshold_frac=0, **kwargs):
    '''
    Centroids an image, or an array of images.
    Centroids over the last 2 dimensions.
    Sets all values under "threshold_frac*max_value" to zero before centroiding
    '''
    if threshold_frac!=0:
        if len(img.shape)==2:
            img = numpy.where(img>threshold_frac*img.max(), img, 0 )
        else:
            img_temp = (img.T - threshold_frac*img.max(-1).max(-1)).T
            zero_coords = numpy.where(img_temp<0)
            img[zero_coords] = 0

    if len(img.shape)==2:
        y_cent,x_cent = numpy.indices(img.shape)
        y_centroid = (y_cent*img).sum()/img.sum()
        x_centroid = (x_cent*img).sum()/img.sum()

    else:
        y_cent, x_cent = numpy.indices((img.shape[-2],img.shape[-1]))
        y_centroid = (y_cent*img).sum(-1).sum(-1)/img.sum(-1).sum(-1)
        x_centroid = (x_cent*img).sum(-1).sum(-1)/img.sum(-1).sum(-1)

    y_centroid+=0.5
    x_centroid+=0.5

    return numpy.array([y_centroid,x_centroid])
Пример #9
0
    def coordinates(self, coord_type='world', origin=0, mode='center'):
        """
        Sky coordinate images.

        Parameters
        ----------
        coord_type : {'world', 'pix', 'skycoord'}
            Which type of coordinates to return.
        origin : {0, 1}
            Pixel coordinate origin.
        mode : {'center', 'edges'}
            Return coordinate values at the pixels edges or pixel centers.
        """
        if mode == 'center':
            y, x = np.indices(self.data.shape)
        elif mode == 'edges':
            shape = self.data.shape[0] + 1, self.data.shape[1] + 1 
            y, x = np.indices(shape)
            y, x = y - 0.5, x - 0.5
        else:
            raise ValueError('Invalid mode to compute coordinates.')
        
        if coord_type == 'pix':
            return x, y
        else:
            xsky, ysky = self.wcs.wcs_pix2world(x, y, origin)
            l, b = Longitude(xsky, unit='deg'), Latitude(ysky, unit='deg')
            l = l.wrap_at('180d')
            if coord_type == 'world':
                return l.degree, b.degree
            elif coord_type == 'skycoord': 
                return l, b
            else:
                raise ValueError("Not a valid coordinate type. Choose either"
                                 " 'world', 'pix' or 'skycoord'.")
Пример #10
0
def directionality_filter(filtered, angle=10, combine=True):
    """
    Finds the maximum filter response for each pixel.

    Returns the maximum filter response and the angle of maximum response.

    """
    f2 = np.power(filtered, 2)

    n_angles = int(180 / angle)
    n_freqs = int(filtered.shape[2] / n_angles)

    if combine:
        f2_combined = np.dstack(f2[:, :, i::n_angles].sum(axis=2)
                                for i in range(n_angles))
        max_angle_idx = np.argmax(f2_combined, axis=2)
        x, y = np.indices(max_angle_idx.shape)
        magnitude = f2[x, y, max_angle_idx]

        angles = np.arange(0, 180, angle)
        max_angles = angles[max_angle_idx]
    else:
        angles = np.hstack(list(np.arange(0, 180, angle)
                                for f in range(n_freqs)))
        idx = np.argmax(filtered, axis=2)
        x, y = np.indices(idx.shape)
        magnitude = f2[x, y, idx]

        max_angles = angles[idx]

    magnitude = magnitude / np.mean(f2, axis=2)
    return magnitude, max_angles
Пример #11
0
def generate_result_maps(data, sourcelist):
    """Return a source and residual image

    Given a data array (image) and list of sources, return two images, one
    showing the sources themselves and the other the residual after the
    sources have been removed from the input data.
    """
    residual_map = numpy.array(data) # array constructor copies by default
    gaussian_map = numpy.zeros(residual_map.shape)
    for src in sourcelist:
        # Include everything with 6 times the std deviation along the major
        # axis. Should be very very close to 100% of the flux.
        box_size = 6 * src.smaj.value / math.sqrt(2 * math.log(2))

        lower_bound_x = max(0, int(src.x.value - 1 - box_size))
        upper_bound_x = min(residual_map.shape[0], int(src.x.value - 1 + box_size))
        lower_bound_y = max(0, int(src.y.value - 1 - box_size))
        upper_bound_y = min(residual_map.shape[1], int(src.y.value - 1 + box_size))

        local_gaussian = gaussian(
            src.peak.value,
            src.x.value,
            src.y.value,
            src.smaj.value,
            src.smin.value,
            src.theta.value
        )(
            numpy.indices(residual_map.shape)[0,lower_bound_x:upper_bound_x,lower_bound_y:upper_bound_y],
            numpy.indices(residual_map.shape)[1,lower_bound_x:upper_bound_x,lower_bound_y:upper_bound_y]
        )

        gaussian_map[lower_bound_x:upper_bound_x, lower_bound_y:upper_bound_y] += local_gaussian
        residual_map[lower_bound_x:upper_bound_x, lower_bound_y:upper_bound_y] -= local_gaussian

    return gaussian_map, residual_map
Пример #12
0
def test_psi_continuous():
    # first make perfect prediction, including pairwise part
    X, Y = toy.generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
    x, y = X[0], Y[0]
    n_states = x.shape[-1]

    pw_horz = -1 * np.eye(n_states)
    xx, yy = np.indices(pw_horz.shape)
    # linear ordering constraint horizontally
    pw_horz[xx > yy] = 1

    # high cost for unequal labels vertically
    pw_vert = -1 * np.eye(n_states)
    pw_vert[xx != yy] = 1
    pw_vert *= 10

    # create crf, assemble weight, make prediction
    crf = DirectionalGridCRF(n_states=3, inference_method='lp')
    w = np.hstack([np.ones(3), -pw_horz.ravel(), -pw_vert.ravel()])
    y_pred = crf.inference(x, w, relaxed=True)

    # compute psi for prediction
    psi_y = crf.psi(x, y_pred)
    assert_equal(psi_y.shape, (crf.size_psi,))
    # first unary, then horizontal, then vertical
    unary_psi = crf.get_unary_weights(psi_y)
    pw_psi_horz, pw_psi_vert = crf.get_pairwise_weights(psi_y)

    # test unary
    xx, yy = np.indices(y.shape)
    assert_array_almost_equal(unary_psi,
                              np.bincount(y.ravel(), x[xx, yy, y].ravel()))
Пример #13
0
    def __init__(self, param):

        # create spatial sources
        num_grid = param.get('gridpoints', 9)
        pixel = np.indices(param['shape'])
        p_dist = param['shape'][0] / num_grid
        self.points = np.indices((num_grid, num_grid)) * p_dist + p_dist
        self.points = zip(self.points[0].flatten(), self.points[1].flatten())
        random.shuffle(self.points)
        components = [gaussian_influence(mu, param['width'])(pixel[0], pixel[1])
                  for mu in self.points[:param['latents']]]
        self.spt_sources = np.array([i.flatten() for i in components])

        # generate activation timcourses
        covgroups = param.get('covgroups', 4)
        self.cov = group_covmtx(param['cov'], 0.1, covgroups, param['latents'] / covgroups)
        marginal_dist = adjusted_gamma(param['mean'], param['var'])
        self.activ_pre = correlated_samples(self.cov, param['no_samples'],
                                             marginal_dist).T
        self.activ_pre[np.isnan(self.activ_pre)] = 0
        # fold with single stim timecourse
        if param['act_time']:
            self.activation = np.vstack([np.outer(i, param['act_time']).flatten()
                                    for i in self.activ_pre]).T
        self.observed_raw = np.dot(self.activation, self.spt_sources)

        # add noise
        noise = param['noisevar'] * np.random.randn(*self.observed_raw.shape)
        self.observed = self.observed_raw.copy() + noise
Пример #14
0
    def vmax_discretized(self, Value, s, xij, i, j):

        nx = self.dims.nx
        ns = s.shape[-1]
        dx = self.dims.dx
        X = self.options.X

        vv = np.full((nx, ns), -np.inf)

        xl, xu = self.bounds(s, i, j)
        xl = xl.T
        xu = xu.T

        for h, x0 in enumerate(X.T):
            is_= np.all((xl <= x0) & (x0 <= xu), 1)
            if np.any(is_):
                xx = np.repeat(x0, ns, 0)
                vv[h, is_] = self.__Bellman_rhs_discrete(Value, xx, s, i, j)

        xmax = np.argmax(vv, 0)

        vxs = [a[0] for a in np.indices(vv.shape)]  # the [0] reduces one dimension
        vxs[0] = xmax
        vij = vv[vxs]

        xxs = [a[0] for a in np.indices(X.T.shape)]
        xxs[0] = xmax
        xij[:] = X.T[xxs]

        return vij
    def get_resources(self, data_dictionary, dataset):
        """Create resources for computing a variable. """
        resources=Resources()
        for key in data_dictionary.keys():
            if key in self.datasets:
                data = data_dictionary[key]
                if self.id_names[key] not in data_dictionary[key].keys() and not isinstance(self.id_names[key], list):
            
                    data[self.id_names[key]] = arange(1,\
                        len(data_dictionary[key][data_dictionary[key].keys()[0]])+1) # add id array
                
                if key == "land_cover":
                    land_cover_storage = StorageFactory().get_storage('dict_storage')
                    land_cover_table_name = 'land_cover'
                    land_cover_storage.write_table(
                            table_name=land_cover_table_name,
                            table_data=data,
                        )

                    lc = LandCoverDataset(
                        in_storage=land_cover_storage, 
                        in_table_name=land_cover_table_name, 
                        )
                        
                    # add relative_x and relative_y
                    lc.get_id_attribute()
                    n = int(ceil(sqrt(lc.size())))
                    
                    if "relative_x" not in data.keys():
                        x = (indices((n,n))+1)[1].ravel()
                        lc.add_attribute(x[0:lc.size()], "relative_x", metadata=1)
                    if "relative_y" not in data.keys():
                        y = (indices((n,n))+1)[0].ravel()
                        lc.add_attribute(y[0:lc.size()], "relative_y", metadata=1)
                        
                    resources.merge({key: lc})
                    
                if key == "gridcell":
                    gridcell_storage = StorageFactory().get_storage('dict_storage')
                    gridcell_table_name = 'gridcell'
                    gridcell_storage.write_table(
                            table_name=gridcell_table_name,
                            table_data=data,
                        )
                    
                    gridcell_dataset = GridcellDataset(
                        in_storage = gridcell_storage,
                        in_table_name = gridcell_table_name,
                        )
                    
                    resources.merge({key: gridcell_dataset})
            else:
                resources.merge({key:data_dictionary[key]})

        if dataset in self.interactions:
            pass
        else:
            resources.merge({"dataset": resources[dataset]})
        resources.merge({"check_variables":'*', "debug":4})
        return resources
Пример #16
0
 def prep_image(self):
   """Takes the solved coordinate system and makes a piecewise \
   transform on the origin image to the target image"""
   transform = ProjectiveTransform()
   self.coord_solver.coordinates = self.coord_solver.min_coords.copy()
   self.new_image = np.zeros(self.coord_solver.image.shape)
   coords = np.array([self.coord_solver.coordinates[x:x+2, y:y+2, :].reshape([4, 2]) for x in \
     range(self.coord_solver.coordinates.shape[0]) for y in range(self.coord_solver.coordinates.shape[1]) \
     if (self.coord_solver.coordinates[x:x+2, y:y+2, :].shape == (2, 2, 2))])
   canonical_coords = np.indices((self.coord_solver.width, self.coord_solver.height)).T.astype('float32')
   flattened_canonical = np.array([canonical_coords[x:x+2, y:y+2, :].reshape([4, 2]) for x in \
     range(canonical_coords.shape[0]-1) for y in range(canonical_coords.shape[1]-1)])
   mesh_size = self.coord_solver.mesh_factor
   print "needs %s calcs" % coords.shape[0]
   coord_grid = np.indices(self.coord_solver.image.shape[:-1]).T.astype('float32').reshape(-1,2)
   for k in range(coords.shape[0]):
     des = mesh_size*coords[k, :, :]
     canon_coord = mesh_size*flattened_canonical[k, :, :]
     src = mesh_size*flattened_canonical[0, :, :]
     if not transform.estimate(des, canon_coord):
       raise Exception("estimate failed at %s" % str(k))
     area_in_question_x = canon_coord[0, 0].astype(int)
     area_in_question_y = canon_coord[0, 1].astype(int)
     scaled_area = tf.warp(self.coord_solver.image, transform)
     area_path = path.Path([des[0],des[1],des[3],des[2],des[0]])
     points_in_area = area_path.contains_points(coord_grid,radius=0.00001).reshape(self.coord_solver.image.shape[:-1])
     self.new_image += scaled_area*points_in_area[:,:,np.newaxis]
 def __init__(self, image=None, mesh_factor=14, density_distribution=None):
   super(CoordinateSolver, self).__init__()
   self.image = image
   self.height, self.width, _ = self.image.shape
   self.mesh_factor = mesh_factor
   self.height /= self.mesh_factor
   self.width /= self.mesh_factor
   self.image = self.image[:self.mesh_factor*self.height, :self.mesh_factor*self.width]
   if type(density_distribution) == np.ndarray:
     restricted_density = density_distribution[:self.mesh_factor*self.height, :self.mesh_factor*self.width]
     target_areas = restricted_density
     target_areas = target_areas[:-1, :-1]
   else:
     target_areas = np.indices((self.width-1, self.height-1)).T.astype('float32')
     target_areas = norm.pdf(target_areas[:, :, 0], self.width/2, self.width/5)\
                   *norm.pdf(target_areas[:, :, 1], self.height/2, self.height/5)
   target_areas /= sum(sum(target_areas))
   
   normalisation_factor = (self.height-1)*(self.width-1)
   target_areas_normalised = target_areas*normalisation_factor
   self.padded_targets = np.zeros([self.height+1, self.width+1])
   self.padded_targets[1:-1, 1:-1] = target_areas_normalised
   self.coordinates = np.indices((self.width, self.height)).T.astype('float32')
   self.total_error = (self.height-1)*(self.width-1)
   
   self.min_coords = self.coordinates.copy()
   self.areas = calculate_areas(self.coordinates)
   self.errors = np.zeros(self.padded_targets.shape)
   self.x_weights = np.ones([self.height*self.width, self.height + 1, self.width + 1])
   self.y_weights = np.ones([self.height*self.width, self.height + 1, self.width + 1])
   self.make_weights()
Пример #18
0
def find_mode(ndarray,axis=0):
    if ndarray.size == 1:
        return (ndarray[0],1)
    elif ndarray.size == 0:
        raise Exception('Attempted to find mode on an empty array!')
    try:
        axis = [i for i in range(ndarray.ndim)][axis]
    except IndexError:
        raise Exception('Axis %i out of range for array with %i dimension(s)' % (axis,ndarray.ndim))

    srt = numpy.sort(ndarray,axis=axis)
    dif = numpy.diff(srt,axis=axis)
    shape = [i for i in dif.shape]
    shape[axis] += 2
    indices = numpy.indices(shape)[axis]
    index = tuple([slice(None) if i != axis else slice(1,-1) for i in range(dif.ndim)])
    indices[index][dif == 0] = 0
    indices.sort(axis=axis)
    bins = numpy.diff(indices,axis=axis)
    location = numpy.argmax(bins,axis=axis)
    mesh = numpy.indices(bins.shape)
    index = tuple([slice(None) if i != axis else 0 for i in range(dif.ndim)])
    index = [mesh[i][index].ravel() if i != axis else location.ravel() for i in range(bins.ndim)]
    #counts = bins[tuple(index)].reshape(location.shape)
    index[axis] = indices[tuple(index)]
    modals = srt[tuple(index)].reshape(location.shape)
    mode = modals[()]
    return (mode)
Пример #19
0
def centreOfGravity(img, threshold=0, **kwargs):
    '''
    Centroids an image, or an array of images.
    Centroids over the last 2 dimensions.
    Sets all values under "threshold*max_value" to zero before centroiding
    Origin at 0,0 index of img.

    Parameters:
        img (ndarray): ([n, ]y, x) 2d or greater rank array of imgs to centroid
        threshold (float): Percentage of max value under which pixels set to 0

    Returns:
        ndarray: Array of centroid values (2[, n])

    '''
    if threshold!=0:
        if len(img.shape)==2:
            img = numpy.where(img>threshold*img.max(), img, 0 )
        else:
            img_temp = (img.T - threshold*img.max(-1).max(-1)).T
            zero_coords = numpy.where(img_temp<0)
            img[zero_coords] = 0

    if len(img.shape)==2:
        y_cent,x_cent = numpy.indices(img.shape)
        y_centroid = (y_cent*img).sum()/img.sum()
        x_centroid = (x_cent*img).sum()/img.sum()

    else:
        y_cent, x_cent = numpy.indices((img.shape[-2],img.shape[-1]))
        y_centroid = (y_cent*img).sum(-1).sum(-1)/img.sum(-1).sum(-1)
        x_centroid = (x_cent*img).sum(-1).sum(-1)/img.sum(-1).sum(-1)

    return numpy.array([x_centroid, y_centroid])
Пример #20
0
def _get_k(input_array, box_dims):
	'''
	Get the k values for input array with given dimensions.
	Return k components and magnitudes.
	For internal use.
	'''
	dim = len(input_array.shape)
	if dim == 1:
		x = np.arange(len(input_array))
		center = x.max()/2.
		kx = 2.*np.pi*(x-center)/box_dims[0]
		return [kx], kx
	elif dim == 2:
		x,y = np.indices(input_array.shape, dtype='int32')
		center = np.array([(x.max()-x.min())/2, (y.max()-y.min())/2])
		kx = 2.*np.pi * (x-center[0])/box_dims[0]
		ky = 2.*np.pi * (y-center[1])/box_dims[1]
		k = np.sqrt(kx**2 + ky**2)
		return [kx, ky], k
	elif dim == 3:
		x,y,z = np.indices(input_array.shape, dtype='int32')
		center = np.array([(x.max()-x.min())/2, (y.max()-y.min())/2, \
						(z.max()-z.min())/2])
		kx = 2.*np.pi * (x-center[0])/box_dims[0]
		ky = 2.*np.pi * (y-center[1])/box_dims[1]
		kz = 2.*np.pi * (z-center[2])/box_dims[2]

		k = get_eval()('(kx**2 + ky**2 + kz**2 )**(1./2.)') 		
		return [kx,ky,kz], k
Пример #21
0
	def __init__(self, dim=2, N=256, n=-2.5, t=None, seed=None, scale=1, name="zeldovich approximation"):
		super(Zeldovich, self).__init__(name=name)

		if seed is not None:
			np.random.seed(seed)
		#sys.exit(0)
		shape = (N,) * dim
		A = np.random.normal(0.0, 1.0, shape)
		F = np.fft.fftn(A)
		K = np.fft.fftfreq(N, 1./(2*np.pi))[np.indices(shape)]
		k = (K**2).sum(axis=0)
		k_max = np.pi
		F *= np.where(np.sqrt(k) > k_max, 0, np.sqrt(k**n) * np.exp(-k*4.0))
		F.flat[0] = 0
		#pylab.imshow(np.where(sqrt(k) > k_max, 0, np.sqrt(k**-2)), interpolation='nearest')
		grf = np.fft.ifftn(F).real
		Q = np.indices(shape) / float(N-1) - 0.5
		s = np.array(np.gradient(grf)) / float(N)
		#pylab.imshow(s[1], interpolation='nearest')
		#pylab.show()
		s /= s.max() * 100.
		#X = np.zeros((4, 3, N, N, N))
		#for i in range(4):
		#if t is None:
		#	s = s/s.max()
		t = t or 1.
		X = Q + s * t

		for d, name in zip(list(range(dim)), "xyzw"):
			self.add_column(name, X[d].reshape(-1) * scale)
		for d, name in zip(list(range(dim)), "xyzw"):
			self.add_column("v"+name, s[d].reshape(-1) * scale)
		for d, name in zip(list(range(dim)), "xyzw"):
			self.add_column(name+"0", Q[d].reshape(-1) * scale)
		return
Пример #22
0
    def polar_workspace_init(radial_bins=256, angular_bins=256,
                             max_radius=None, centre=None): 
        #if (centre == None) and self.centre == None:
            #pass # Raise an exception

        xdim = self.image.shape[0]
        ydim = self.image.shape[1]

        if centre == None:
            xc = xdim * 0.5
            yc = ydim * 0.5 
        else:
            xc = centre[0]
            yc = centre[1]

        # Calculate minimum distance from centre to edge of image - this
        # determines the maximum radius in the polar image
        xsize = min (xdim + 0.5 - xc, xc)
        ysize = min (ydim + 0.5 - yc, yc)
        max_rad = m.sqrt(xsize**2 + ysize**2)

        if max_radius == None:
            max_radius = max_rad
        elif max_radius > max_rad:
            raise ValueError
        
        # Set up interpolation - cubic spline with no smoothing by default 
        x = numpy.indices((xdim,)) + 0.5 - centre[0]
        y = numpy.indices((ydim,)) + 0.5 - centre[1]
        interp = spint.RectBivariateSpline(x, y, self.image)

        # Polar image bin widths
        theta_bin_width = (2.0 * math.pi) / (theta_bins - 1.0)
        radial_bin_width = max_radius / (radial_bins - 1.0)

        # Calculate polar image values - use vectorization for efficiency
        # Because we broadcast when using a ufunc (created by frompyfunc
        # below), we could get away with an ogrid here to save time and space?
        r, theta = numpy.mgrid[0:radial_bins, 0:angular_bins]
        theta = (theta + 0.5) * theta_bin_width
        r = (r + 0.5) * radial_bin_width

        def polar_pix_val(r, theta):
            # Should we use the numpy.sin/cos functions here for more
            # efficiency ?
            return interp.ev(r * m.sin(theta), r * m.cos(theta))

        numpy.frompyfunc(polar_pix_val, 2, 1)
        self.pimage = polar_pix_val(r, theta)

        # Calculate polar image values - non-vectorized version
        self.pimage = numpy.empty(radial_bins, angular_bins)
        for r in radial_bins:
            R = (r + 0.5) * radial_bin_width;
            for t in theta_bins:
                theta = (t + 0.5) * theta_bin_width
                x = R * sin(theta)
                y = R * cos(theta)
                self.pimage[r, t] = interp.ev(x, y)
Пример #23
0
def dilate(cube, mask, objects, cathead, Parameters):
    dilateThreshold = Parameters["parameters"]["dilateThreshold"]
    dilatePixMax = Parameters["parameters"]["dilatePixMax"]
    dilateChan = Parameters["parameters"]["dilateChan"]
    # stops dilating when (flux_new-flux_old)/flux_new < dilateThreshold
    for mm in range(1, mask.max() + 1):
        obj = objects[mm - 1]
        xmin = obj[list(cathead).index("x_min")] - dilatePixMax
        xmax = obj[list(cathead).index("x_max")] + dilatePixMax
        ymin = obj[list(cathead).index("y_min")] - dilatePixMax
        ymax = obj[list(cathead).index("y_max")] + dilatePixMax
        zmin = obj[list(cathead).index("z_min")] - dilateChan
        zmax = obj[list(cathead).index("z_max")] + dilateChan
        xmin = max(0, xmin)
        xmax = min(xmax, cube.shape[2] - 1)
        ymin = max(0, ymin)
        ymax = min(ymax, cube.shape[1] - 1)
        zmin = max(0, zmin)
        zmax = min(zmax, cube.shape[0] - 1)
        objcube = cube[zmin : zmax + 1, ymin : ymax + 1, xmin : xmax + 1].copy()
        objmask = mask[zmin : zmax + 1, ymin : ymax + 1, xmin : xmax + 1].copy()
        allmask = mask[zmin : zmax + 1, ymin : ymax + 1, xmin : xmax + 1].copy()
        otherobjs = (allmask > 0) * (allmask != mm)
        if (otherobjs).sum():
            # Ensure that objects!=mm within dilatePixMax, dilateChan are not included in the flux growth calculation
            print "WARNING: object %i has possible overlapping objects within %i pix, %i chan" % (
                mm,
                dilatePixMax,
                dilateChan,
            )
            objcube[(allmask > 0) * (allmask != mm)] = 0
        fluxes = []
        for dil in range(dilatePixMax + 1):
            dd = dil * 2 + 1
            dilstruct = (np.sqrt(((np.indices((dd, dd)) - dil) ** 2).sum(axis=0)) <= dil).astype(int)
            dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1]))
            dilstruct = dilstruct.repeat(dilateChan * 2 + 1, axis=0)
            fluxes.append(objcube[nd.morphology.binary_dilation(objmask == mm, structure=dilstruct)].sum())
            if dil > 0 and (fluxes[-1] - fluxes[-2]) / fluxes[-1] < dilateThreshold:
                break
        # pick the best dilation kernel for current object and update mask
        dil -= 1
        print "Mask dilation of source %i by %i pix and %i chan" % (mm, dil, dilateChan)
        sys.stdout.flush()
        dd = dil * 2 + 1
        dilstruct = (np.sqrt(((np.indices((dd, dd)) - dil) ** 2).sum(axis=0)) <= dil).astype(int)
        dilstruct.resize((1, dilstruct.shape[0], dilstruct.shape[1]))
        dilstruct = dilstruct.repeat(dilateChan * 2 + 1, axis=0)
        # Only grow the mask of object mm even when other objects are present in objmask
        objmask[nd.morphology.binary_dilation(objmask == mm, structure=dilstruct).astype(int) == 1] = mm
        # Put back in objmask objects!=mm that may have been inside objmask before dilation or may have been temporarily replaced by the dilated object mm
        if (otherobjs).sum():
            objmask[otherobjs] = allmask[otherobjs]
        mask[zmin : zmax + 1, ymin : ymax + 1, xmin : xmax + 1] = objmask
        del (objcube)
        del (objmask)
        del (allmask)
        del (otherobjs)
    return mask
Пример #24
0
def level_image(im, poly_vert=1, poly_horiz=1, box=None, poly=None, mode="clip"):
    """Subtract a polynomial background from image

    Keword Arguments:
        poly_vert (int): fit a polynomial in the vertical direction for the image of order
            given. If 0 do not fit or subtract in the vertical direction
        poly_horiz (int): fit a polynomial of order poly_horiz to the image. If 0 given
            do not subtract
        box (array, list or tuple of int): [xmin,xmax,ymin,ymax] define region for fitting. IF None use entire
            image
        poly (list or None): [pvert, phoriz] pvert and phoriz are arrays of polynomial coefficients
            (highest power first) to subtract in the horizontal and vertical
            directions. If None function defaults to fitting its own polynomial.
        mode (str): Either 'clip' or 'norm' - specifies how to handle intensitry values that end up being outside
            of the accepted range for the image.

    Returns:
        A new copy of the processed images.

    Fit and subtract a background to the image. Fits a polynomial of order
    given in the horizontal and vertical directions and subtracts. If box
    is defined then level the *entire* image according to the
    gradient within the box. The polynomial subtracted is added to the
    metadata as 'poly_vert_subtract' and 'poly_horiz_subtract'
    """
    if box is None:
        box = im.max_box
    cim = im.crop_image(box=box)
    (vertl, horizl) = cim.shape
    p_horiz = 0
    p_vert = 0
    if poly_horiz > 0:
        comp_vert = np.average(cim, axis=0)  # average (compress) the vertical values
        if poly is not None:
            p_horiz = poly[0]
        else:
            p_horiz = np.polyfit(np.arange(horizl), comp_vert, poly_horiz)  # fit to the horizontal
            av = np.average(comp_vert)  # get the average pixel height
            p_horiz[-1] = p_horiz[-1] - av  # maintain the average image height
        horizcoord = np.indices(im.shape)[1]  # now apply level to whole image
        for i, c in enumerate(p_horiz):
            im = im - c * horizcoord ** (len(p_horiz) - i - 1)
    if poly_vert > 0:
        comp_horiz = np.average(cim, axis=1)  # average the horizontal values
        if poly is not None:
            p_vert = poly[1]
        else:
            p_vert = np.polyfit(np.arange(vertl), comp_horiz, poly_vert)
            av = np.average(comp_horiz)
            p_vert[-1] = p_vert[-1] - av  # maintain the average image height
        vertcoord = np.indices(im.shape)[0]
        for i, c in enumerate(p_vert):
            im = im - c * vertcoord ** (len(p_vert) - i - 1)
    im.metadata["poly_sub"] = (p_horiz, p_vert)
    if mode == "clip":
        im = im.clip_intensity()  # saturate any pixels outside allowed range
    elif mode == "norm":
        im = im.normalise()
    return im
Пример #25
0
def av2(Z, w1, w2, index = 0):
    o = np.indices(Z.shape, Z.dtype)[index]
    o1, o2 = np.indices(Z.shape, Z.dtype)[-2:]
    lW = np.log(Z) + o1 * np.log(w1) + o2 * np.log(w2)
    W = np.exp(lW - lW.max() + np.log(1e300))
    A = (W).sum(-1).sum(-1)
    B = (W * o).sum(-1).sum(-1)
    return B/A
Пример #26
0
def test_mix_types():
    f = np.zeros((64,64), np.uint16)
    f += (np.indices(f.shape)[1]**2).astype(np.uint16)
    f += ((np.indices(f.shape)[0]-23)**2).astype(np.uint16)
    markers = np.zeros((64,64), np.int64)
    markers[32,32] = 1
# Below used to force a crash (at least in debug mode)
    a,b = mahotas.cwatershed(f, markers, return_lines=1)
Пример #27
0
 def level_image(self, poly_vert=1, poly_horiz=1, box=None, poly=None):
     """Subtract a polynomial background from image
     
     Fit and subtract a background to the image. Fits a polynomial of order
     given in the horizontal and vertical directions and subtracts. If box 
     is defined then level the *entire* image according to the 
     gradient within the box.
     
     Parameters
     ----------
     poly_vert: int
         fit a polynomial in the vertical direction for the image of order 
         given. If 0 do not fit or subtract in the vertical direction
     poly_horiz: int
         fit a polynomial of order poly_horiz to the image. If 0 given
         do not subtract
     box: array, list or tuple of int
         [xmin,xmax,ymin,ymax] define region for fitting. IF None use entire
         image
     poly: list or None
         [pvert, phoriz] pvert and phoriz are arrays of polynomial coefficients
         (highest power first) to subtract in the horizontal and vertical 
         directions. If None function defaults to fitting its own polynomial.
         
     Returns
     -------
     im: KerrArray
         the levelled image
     """
     if box is None:
         box=self.max_box
     cim=self.crop_image(box=box)
     (vertl,horizl)=cim.shape
     if poly_horiz>0:
         comp_vert = np.average(cim, axis=0) #average (compress) the vertical values
         if poly is not None:
             p=poly[0]
         else:
             p=np.polyfit(np.arange(horizl),comp_vert,poly_horiz) #fit to the horizontal
             av=np.average(comp_vert) #get the average pixel height
             p[-1]=p[-1]-av #maintain the average image height
         horizcoord=np.indices(self.shape)[1] #now apply level to whole image 
         for i,c in enumerate(p):
             self=self-c*horizcoord**(len(p)-i-1)
         self.metadata['poly_vert_subtract']=p
     if poly_vert>0:
         comp_horiz = np.average(cim, axis=1) #average the horizontal values
         if poly is not None:
             p=poly[1]
         else:
             p=np.polyfit(np.arange(vertl),comp_horiz,poly_vert)
             av=np.avearage(comp_horiz)
             p[-1]=p[-1]-av #maintain the average image height
         vertcoord=np.indices(self.shape)[0]
         for i,c in enumerate(p):
             self=self-c*vertcoord**(len(p)-i-1)
         self.metadata['poly_horiz_subtract']=p
     return self
Пример #28
0
def radial_average(input_array, box_dims, bins=10, weights=None):
	''' 
	Radially average data.
	Parameters:
		* input_array --- array containing the data to be averaged
		* box_dims  --- tuple with the size of the box in comoving Mpc along each axis
	kwargs:
		* bins = 10 --- the k bins. Can be an integer specifying the number of bins,
		or an array with the bin edges 
	Returns:
		Tuple containing the binned data and the bin edges
	'''
	
	if weights != None:
		input_array *= weights

	#Make an array containing distances to the center
	dim = len(input_array.shape)
	if dim == 2:
		x,y = np.indices(input_array.shape)
		center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
		kx = (x-center[0])/box_dims[0]
		ky = (y-center[1])/box_dims[1]
	elif dim == 3:
		x,y,z = np.indices(input_array.shape)
		center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0, (z.max()-z.min())/2.0])
		kx = (x-center[0])/box_dims[0]
		ky = (y-center[1])/box_dims[1]
		kz = (z-center[2])/box_dims[2]

	else:
		raise Exception('Check your dimensions!')

	#Calculate k values
	if dim == 3:
		k = np.sqrt(kx**2 + ky**2 + kz**2 ) * 2.*np.pi
	else:
		k = np.sqrt(kx**2 + ky**2 ) * 2.*np.pi

	#If bins is an integer, make linearly spaced bins
	if isinstance(bins,int):
		kmin = 2.*np.pi/min(box_dims)
		bins = np.linspace(kmin, k.max(), bins+1)
	
	#Bin the data
	nbins = len(bins)-1
	dk = (bins[1:]-bins[:-1])/2.
	outdata = np.zeros(nbins)
	for ki in range(nbins):
		kmin = bins[ki]
		kmax = bins[ki+1]
		idx = (k >= kmin) * (k < kmax)
		outdata[ki] = np.mean(input_array[idx])

		if weights != None:
			outdata[ki] /= weights[idx].mean()

	return outdata, bins[:-1]+dk
Пример #29
0
def test_mix_types():
    "[watershed regression]: Mixing types of surface and marker arrays used to cause crash"
    f = np.zeros((64,64), np.uint16)
    f += (np.indices(f.shape)[1]**2).astype(np.uint16)
    f += ((np.indices(f.shape)[0]-23)**2).astype(np.uint16)
    markers = np.zeros((64,64), np.int64)
    markers[32,32] = 1
# Below used to force a crash (at least in debug mode)
    a,b = mahotas.cwatershed(f, markers, return_lines=1)
Пример #30
0
def st2(Z, w1, w2, index = 0):
    o = np.indices(Z.shape, double)[index]
    o1, o2 = np.indices(Z.shape, double)[-2:]
    lW = np.log(Z) + o1 * np.log(w1) + o2 * np.log(w2)
    W = np.exp(lW - lW.max() + np.log(1e300))
    A = (W).sum(-1).sum(-1)
    B = (W * o).sum(-1).sum(-1)
    C = (W * o ** 2).sum(-1).sum(-1)
    return C/A - (B/A)**2
Пример #31
0
def calc_positional_encoding(sequence_dim, d_model):
    rows, cols = np.indices((sequence_dim, d_model))
    numerator = np.where(cols % 2, np.cos(rows), np.sin(rows))
    denominator = (10_000**((2 * cols) / d_model))
    return numerator / denominator
Пример #32
0
def getnoisedis(directories, path, catpath, deconvOffset=False, offset=False):
    import scipy.stats.mstats, scipy.optimize
    import pylab
    import glob
    import numpy as np

    from astropy.io import fits
    from astropy.io import ascii
    import matplotlib.pyplot as plt
    import warnings
    import os

    tile = directories.split('/')[1]
    sig = np.loadtxt('./{}/{}/noise.txt'.format(path, tile))
    print(sig)
    # print (1/0.)

    tmpcat = np.loadtxt(catpath,
                        skiprows=1,
                        usecols=[i for i in np.arange(11, 43)])
    _ids = np.loadtxt(catpath, skiprows=1, usecols=[0])
    _ids = list(_ids)
    snrs = 10**((tmpcat[:, ::2] - tmpcat[:, 1::2]) / -2.5)
    fluxes = 10**((tmpcat[:, ::2] - 25) / -2.5)

    def getSNR(_id, idx, fluxes):
        fluxid = [1, 2, 3, 4, 13, 6, 12, 7, 11, 15, 10, 8,
                  14]  # making sure to retrieve the right flux
        fluxid = [8, 14, 15, 10, 11, 12, 13, 7, 6, 4, 1, 2, 0, 3]
        id_idx = _ids.index(_id)
        return fluxes[id_idx, fluxid[idx]]

    def _return_offseted_data(offsetsdata, filtname, data, dec=False):

        bands = ['subaru_IA427', 'subaru_B', 'subaru_IA484', 'subaru_IA505', 'subaru_IA527', 'subaru_V',\
         'subaru_IA624', 'subaru_rp', 'subaru_IA738', 'subaru_zp', 'ultravista_Y', 'ultravista_J', 'ultravista_H']

        if filtname == 'ultravista_Ks':
            dy, dx = [0, 0]
        else:
            idx_offset = bands[::-1].index(filtname)
            dy = offsetsdata['dy'].data[idx_offset]
            dx = offsetsdata['dx'].data[idx_offset]

        if dec:
            data = np.roll(data, int(dy), 0)
            data = np.roll(data, int(dx), 1)
        else:
            data = np.roll(data, int(dy * 3), 0)
            data = np.roll(data, int(dx * 3), 1)
        # plt.imshow(data, origin='lower')
        # plt.show()
        return data

    warnings.simplefilter("error", RuntimeWarning)
    for d in tqdm(glob.glob('./{}/'.format(path) + directories)[:]):
        _id = int(os.path.basename(d).split('_')[1].split('-')[1])
        # tile = d.split('/')[4][1:]

        hdu = fits.open('./{}/{}/watershed_segmaps/_id-{}.fits'.format(
            path, tile, _id))
        segmap = hdu[0].data
        hdu.close()

        if deconvOffset:
            dec_offsets = ascii.read('./{}/{}/offsets/_id-{}-dec.dat'.format(
                path, tile, int(_id)))
        if offset:
            offsets = ascii.read('./{}/{}/offsets/_id-{}.dat'.format(
                path, tile, int(_id)))

        for i, fdir in enumerate(sorted(glob.glob(d + '/*-*'))):  #*.0
            filtname = os.path.basename(fdir).split('_')[0].replace('-', '_')
            dataset = os.path.basename(fdir).split('-')[0]

            scalings = [10**(-6.4 / 2.5), 10**(-5 / 2.5)]
            if 'subaru' in dataset:
                scaling = scalings[0]
                gain = 3.
            else:
                gain = 4.
                scaling = scalings[1]

            hdu = fits.open(fdir + '/deconv_01.fits')
            data = hdu[0].data  #*scaling
            dec = data.copy()
            hdu.close()

            snr = getSNR(int(_id), i, fluxes)
            apermask = misc.createCircularMask(156,
                                               156,
                                               center=[156 / 2, 156 / 2],
                                               radius=21.)
            masked_img = data.copy()
            masked_img[~apermask] = 0
            fact = float(
                diagnostics.search_cfg(fdir + '/config.py'.format(tile[1:]),
                                       'IMG_FACT').strip("[]"))
            # print (snr/masked_img.sum()*masked_img.sum(), snr )

            if offset: data = _return_offseted_data(offsets, filtname, data)
            if deconvOffset:
                data = _return_offseted_data(dec_offsets,
                                             filtname,
                                             data,
                                             dec=True)

            data[segmap == 0] = 0
            zeroidx = np.where(data.ravel() != 0)[0]
            # print np.min(data), np.max(data)

            count = 0
            try:
                noise = np.ones(data.shape) * np.sqrt(data / gain + sig[i]**2)
            except RuntimeWarning:
                noise = np.ones(data.shape) * np.sqrt(sig[i]**2)
                count += 1
                # if 'B' not in filtname: print ('aperture noise less than poison noise: '+filtname)

            y, x = np.indices(noise.shape)
            savedata = np.c_[y.ravel()[zeroidx],
                             x.ravel()[zeroidx],
                             data.ravel()[zeroidx] * snr / masked_img.sum(),
                             noise.ravel()[zeroidx] * snr /
                             masked_img.sum()]  #.T

            # with open(fdir+'/vorbin_input.txt', 'w+') as datafile_id:
            if os.path.isfile(fdir + '/vorbin_input.txt'):
                os.remove(fdir + '/vorbin_input.txt')
            np.savetxt(fdir + '/vorbin_input.txt',
                       savedata)  #savedata, fmt=['%f','%f', '%f','%f'])
Пример #33
0
import matplotlib.pyplot as plt
import numpy as np


def midpoints(x):
    sl = ()
    for i in range(x.ndim):
        x = (x[sl + np.index_exp[:-1]] + x[sl + np.index_exp[1:]]) / 2.0
        sl += np.index_exp[:]
    return x


# prepare some coordinates, and attach rgb values to each
r, g, b = np.indices((17, 17, 17)) / 16.0
rc = midpoints(r)
gc = midpoints(g)
bc = midpoints(b)

# define a sphere about [0.5, 0.5, 0.5]
sphere = (rc - 0.5)**2 + (gc - 0.5)**2 + (bc - 0.5)**2 < 0.5**2

# combine the color components
colors = np.zeros(sphere.shape + (3, ))
colors[..., 0] = rc
colors[..., 1] = gc
colors[..., 2] = bc

# and plot everything
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.voxels(
Пример #34
0
def complex_to_rgb(z=None,
                   amin=None,
                   amax=None,
                   mode='special',
                   phstart=0.,
                   sat=1.0,
                   as_image=True):
    """\
    Complex to RGB (Red,Green,Blue) transformation.
    Attempts to image a complex array *z* in R, G, B coloration, where
    phase(z) is mapped to Hue and podulus(z) is mapped to a choice of
    perceived brightness.
    
    Parameters
    ----------
    z : array-like
        Input should be two-dimensional. If no input is given, a colorwheel
        is produced.
        
    amin : float
        All z-values with modulus below `amin` will appear black. Defaults
        to np.abs(z).min() if no value for `amin` was provided
    
    amix : float
        All z-values with modulus below `amax` will appear white. Defaults
        to 0.9*np.abs(z).max() if no value for `amax` was provided
        
    mode : str
        Choose between 'special','chroma' or 'pastell'. For 'special', 
        the individual channels will not receive gamma correction but 
        the luminance (Y-value) with the effect that the image will
        preserve the modulus when watched in grayscale. For 'chroma', 
        the chroma of the channels is distorted while preserving the 
        lightness, with the effect, that blue and red channel become 
        saturated. For 'pastell', lightness and chroma are preserved 
        by changing saturation to the appropriate value, with the effect, 
        that only pastell colors are available.
    
    phstart : float
        Starting (hue) of for the phase. Choose in the range [0-2*pi].
        
    sat : float
        Saturation value, defaults to 1.0 (maximum saturation when possible).
        
    as_image : bool
        See return
        
    Returns
    -------
    rgb : ndarray
        If `as-images` is True, returns 8bit array (m,n,3) where last 
        axis is RGB color.
        If `as-images` is False, returns all three channels concatenated,
        RGB (3,m,n), as float values in the range [0,1.].
        
    """
    if z is None:
        x, y = np.indices((200, 200)) - 99.5
        z = x + 1j * y
        amax = 100
        amin = 0

    H = np.degrees(np.angle(z) + np.pi + phstart) % 360.
    A = np.abs(z)
    amin = A.min() if amin is None else amin
    amax = 1.2 * A.max() if amax is None else amax
    if np.allclose(amin, amax):
        amin = 0
    if np.allclose(0, amax):
        amix = 1

    A = (A - amin) / (amax - amin)
    S = sat * 100
    if str(mode) == 'special':
        XYZ = np.asarray(
            luv_to_xyz(lch_to_luv(huslp_to_lch([H, S, np.sqrt(A) * 100]))))
        R, G, B = [
            np.sum(XYZ * np.array(mi).reshape((3, ) + (XYZ.ndim - 1) * (1, )),
                   0) for mi in m
        ]
    elif str(mode) == 'chroma':
        R, G, B = husl_to_rgb(H, S, A * 100)
    else:
        R, G, B = huslp_to_rgb(H, S, A * 100)

    if as_image:
        return np.uint8(np.array([R, G, B]).swapaxes(0, 2) * 255)
    else:
        return np.array([R, G, B])
Пример #35
0
def HANTS(sample_count,
          inputs,
          frequencies_considered_count=3,
          outliers_to_reject='Lo',
          low=0.,
          high=255,
          fit_error_tolerance=5,
          delta=0.1):
    """
    Function to apply the Harmonic analysis of time series applied to arrays

    sample_count    = nr. of images (total number of actual samples of the time series)
    base_period_len    = length of the base period, measured in virtual samples
            (days, dekads, months, etc.)
    frequencies_considered_count    = number of frequencies to be considered above the zero frequency
    inputs     = array of input sample values (e.g. NDVI values)
    ts    = array of size sample_count of time sample indicators
            (indicates virtual sample number relative to the base period);
            numbers in array ts maybe greater than base_period_len
            If no aux file is used (no time samples), we assume ts(i)= i,
            where i=1, ..., sample_count
    outliers_to_reject  = 2-character string indicating rejection of high or low outliers
            select from 'Hi', 'Lo' or 'None'
    low   = valid range minimum
    high  = valid range maximum (values outside the valid range are rejeced
            right away)
    fit_error_tolerance   = fit error tolerance (points deviating more than fit_error_tolerance from curve
            fit are rejected)
    dod   = degree of overdeterminedness (iteration stops if number of
            points reaches the minimum required for curve fitting, plus
            dod). This is a safety measure
    delta = small positive number (e.g. 0.1) to suppress high amplitudes
    """

    # define some parameters
    base_period_len = sample_count  #

    # check which setting to set for outlier filtering
    if outliers_to_reject == 'Hi':
        sHiLo = -1
    elif outliers_to_reject == 'Lo':
        sHiLo = 1
    else:
        sHiLo = 0

    nr = min(
        2 * frequencies_considered_count + 1,
        sample_count)  # number of 2*+1 frequencies, or number of input images

    # create empty arrays to fill
    outputs = np.zeros(shape=(inputs.shape[0], sample_count))

    mat = get_starter_matrix(base_period_len, sample_count,
                             frequencies_considered_count)

    # repeat the mat array over the number of arrays in inputs
    # and create arrays with ones with shape inputs where high and low values are set to 0
    mat = np.tile(mat[None].T, (1, inputs.shape[0])).T
    p = np.ones_like(inputs)
    p[(low >= inputs) | (inputs > high)] = 0
    nout = np.sum(p == 0, axis=-1)  # count the outliers for each timeseries

    # prepare for while loop
    ready = np.zeros((inputs.shape[0]),
                     dtype=bool)  # all timeseries set to false

    dod = 1  # (2*frequencies_considered_count-1)  # Um, no it isn't :/
    noutmax = sample_count - nr - dod
    # prepare to add delta to suppress high amplitudes but not for [0,0]
    Adelta = np.tile(np.diag(np.ones(nr))[None].T,
                     (1, inputs.shape[0])).T * delta
    Adelta[:, 0, 0] -= delta

    for _ in xrange(sample_count):
        if ready.all():
            break

        # multiply outliers with timeseries
        za = np.einsum('ijk,ik->ij', mat, p * inputs)
        #print za

        # multiply mat with the multiplication of multiply diagonal of p with transpose of mat
        diag = makediag3d(p)
        #print diag

        A = np.einsum('ajk,aki->aji', mat,
                      np.einsum('aij,jka->ajk', diag, mat.T))
        # add delta to suppress high amplitudes but not for [0,0]
        A += Adelta
        #A[:, 0, 0] = A[:, 0, 0] - delta
        #print A

        # solve linear matrix equation and define reconstructed timeseries
        zr = np.linalg.solve(A, za)
        #print zr

        outputs = np.einsum('ijk,kj->ki', mat.T, zr)
        #print outputs

        # calculate error and sort err by index
        err = p * (sHiLo * (outputs - inputs))
        rankVec = np.argsort(
            err,
            axis=1,
        )

        # select maximum error and compute new ready status
        maxerr = np.max(err, axis=-1)
        #maxerr = np.diag(err.take(rankVec[:, sample_count - 1], axis=-1))
        ready = (maxerr <= fit_error_tolerance) | (nout == noutmax)

        # if ready is still false
        if not ready.all():
            j = rankVec.take(sample_count - 1, axis=-1)

            p.T[j.T,
                np.indices(j.shape)] = p.T[j.T,
                                           np.indices(j.shape)] * ready.astype(
                                               int)  #*check
            nout += 1

    logging.info('function `HANTS` complete')
    return outputs
Пример #36
0
    def predict(self, input_data, model=None):

        repetition_offsets = [np.linspace(0, self.input_patch_shape[axis] - 1, self.patch_overlaps, dtype=int) for axis in self.patch_dimensions]

        if self.pad_borders:
            # TODO -- Clean up this border-padding code and make it more readable.
            input_pad_dimensions = [(0, 0)] * input_data.ndim
            repatched_shape = self.output_shape
            new_input_shape = list(input_data.shape)
            for idx, dim in enumerate(self.patch_dimensions):
                # Might not work for odd-shaped patches; check.
                input_pad_dimensions[dim] = (int(self.input_patch_shape[dim] / 2), int(self.input_patch_shape[dim] / 2))
                new_input_shape[dim] += self.input_patch_shape[dim]
            for idx, dim in enumerate(self.output_patch_dimensions):
                repatched_shape[dim] += self.input_patch_shape[dim]

            padded_input_data = np.zeros(new_input_shape)
            if self.channels_first:
                input_slice = [slice(None)] * 2 + [slice(self.input_patch_shape[dim] / 2, -self.input_patch_shape[dim] / 2, None) for dim in self.patch_dimensions]
            else:
                input_slice = [slice(None)] + [slice(self.input_patch_shape[dim] / 2, -self.input_patch_shape[dim] / 2, None) for dim in self.patch_dimensions] + [slice(None)]
            padded_input_data[tuple(input_slice)] = input_data
            input_data = padded_input_data

        repatched_image = np.zeros(repatched_shape)

        corner_data_dims = [input_data.shape[axis] for axis in self.patch_dimensions]
        corner_patch_dims = [self.output_patch_shape[axis] for axis in self.patch_dimensions]

        all_corners = np.indices(corner_data_dims)

        # There must be a better way to round up to an integer..
        possible_corners_slice = [slice(None)] + [slice(self.input_patch_shape[dim] / 2, -self.input_patch_shape[dim] / 2, None) for dim in self.patch_dimensions]
        all_corners = all_corners[tuple(possible_corners_slice)]

        for rep_idx in range(self.patch_overlaps):

            if self.verbose:
                docker_print('Predicting patch set', str(rep_idx + 1) + '/' + str(self.patch_overlaps) + '...')

            corners_grid_shape = [slice(None)]
            for dim in range(all_corners.ndim - 1):
                corners_grid_shape += [slice(repetition_offsets[dim][rep_idx], corner_data_dims[dim], corner_patch_dims[dim])]

            corners_list = all_corners[tuple(corners_grid_shape)]
            corners_list = np.reshape(corners_list, (corners_list.shape[0], -1)).T

            if self.check_empty_patch:
                corners_list = self.remove_empty_patches(input_data, corners_list)

            for corner_list_idx in range(0, corners_list.shape[0], self.batch_size):

                corner_batch = corners_list[corner_list_idx:corner_list_idx + self.batch_size]
                input_patches = self.grab_patch(input_data, corner_batch)
                
                prediction = self.model.predict(input_patches)
                
                self.insert_patch(repatched_image, prediction, corner_batch)

            if rep_idx == 0:
                output_data = np.copy(repatched_image)
            else:
                output_data = output_data + (1.0 / (rep_idx)) * (repatched_image - output_data)  # Running Average

        if self.pad_borders:

            output_slice = [slice(None)] * output_data.ndim  # Weird
            for idx, dim in enumerate(self.output_patch_dimensions):
                # Might not work for odd-shaped patches; check.
                output_slice[dim] = slice(self.input_patch_shape[dim] / 2, -self.input_patch_shape[dim] / 2, 1)
            output_data = output_data[tuple(output_slice)]

        return output_data
Пример #37
0
# create sampling point for function

# get the max
if centre == 'max':
    centre_p = np.squeeze(np.where(data == data.max()))
    # get the centroid
elif centre == 'centroid':
    centre_p = sm.center_of_mass(data)
elif type(centre) is list:
    centre_p = wcs.wcs_sky2pix([centre], 1)[0]  # centre in pixels
else:
    sys.exit('centre must be [coord,inates],\'max\' or \'centroid\'')

print("Central pixel is", centre_p)
x, y = np.indices((wcs.naxis2, wcs.naxis1))
r = np.sqrt((x - centre_p[0])**2 +
            (y - centre_p[1])**2)  # radial distance of each pixel

# create ann_radii from the pixels size
assert abs(hdulist[0].header['CDELT1']) == abs(hdulist[0].header['CDELT2'])
print("Annului are", ann_separation / abs(hdulist[0].header['CDELT1']),
      'pixels wide')
ann_radii = np.arange(0, np.max(r), ann_separation /
                      abs(hdulist[0].header['CDELT1']))  # CDELT in in deg

# cycle on annuli
ann_means_val = []
ann_std_val = []
ann_means_radii = []
ann_std_radii = []
Пример #38
0
def main(overide={}):
    # get command line args and config
    sc = 'make_pixel_map_poly'

    # search the current directory for *.ini files if not present in cxi directory
    config_dirs = [os.path.split(os.path.abspath(__file__))[0]]

    # extract the first paragraph from the doc string
    #des = st.generate_pixel_map.__doc__.split('\n\n')[0]
    des = ""

    # now load the necessary data
    args, params = cmdline_config_cxi_reader.get_all(sc,
                                                     des,
                                                     config_dirs=config_dirs,
                                                     roi=True)
    params = params[sc]

    # overide with input params (if any)
    params.update(overide)

    # evaluate the polynomial expreesion for the pixel map
    y, x = np.indices(params['whitefield'].shape)
    ufs = eval(params['pixel_map_fs'])
    uss = eval(params['pixel_map_ss'])
    u = np.array([uss, ufs])
    u = np.clip(u, -1000, 1000)

    # generate the pixel translations
    M = params['z'] / params['defocus']
    dfs = params['x_pixel_size'] / M
    dss = params['y_pixel_size'] / M

    pixel_translations = st.make_pixel_translations(params['translations'],
                                                    params['basis'], dss, dfs)

    O, n0, m0 = st.make_object_map(
        params['data'].astype(params['whitefield'].dtype), params['mask'],
        params['whitefield'], pixel_translations,
        u.astype(params['whitefield'].dtype), params['ls'])

    u0 = np.array(np.indices(params['mask'].shape))
    du = u - u0

    out = {
        'reference_image': O,
        'n0': n0,
        'm0': m0,
        'pixel_map': u,
        'pixel_map_residual': du,
        'pixel_translations': pixel_translations,
        'dfs': dfs,
        'dss': dss
    }
    cmdline_config_cxi_reader.write_all(params,
                                        args.filename,
                                        out,
                                        apply_roi=True)

    # output display for gui
    with open('.log', 'w') as f:
        print('display: ' + params['h5_group'] + '/reference_image', file=f)
Пример #39
0
    def process_Multiple(self, img, g_sublist, mask, src_index, isrc, subim, isl, delc, subn, subm):
        """ Same as gaul_to_source.f. isrc is same as k in the fortran version. """
        from math import pi, sqrt
        from .const import fwsig
        from scipy import ndimage
        from . import functions as func

        mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Gaul2Srl  ")
        dum = img.beam[0]*img.beam[1]
        cdeltsq = img.wcs_obj.acdelt[0]*img.wcs_obj.acdelt[1]
        bmar_p = 2.0*pi*dum/(cdeltsq*fwsig*fwsig)

                                        # try
        subim_src = self.make_subim(subn, subm, g_sublist, delc)
        mompara = func.momanalmask_gaus(subim_src, mask, isrc, bmar_p, True)
                                        # initial peak posn and value
        maxv = N.max(subim_src)
        maxx, maxy = N.unravel_index(N.argmax(subim_src), subim_src.shape)
                                        # fit gaussian around this posn
        blc = N.zeros(2,dtype=N.int); trc = N.zeros(2,dtype=N.int)
        n, m = subim_src.shape[0:2]
        bm_pix = N.array([img.pixel_beam()[0]*fwsig, img.pixel_beam()[1]*fwsig, img.pixel_beam()[2]])
        ssubimsize = max(N.int(N.round(N.max(bm_pix[0:2])*2))+1, 5)
        blc[0] = max(0, maxx-(ssubimsize-1)/2); blc[1] = max(0, maxy-(ssubimsize-1)/2)
        trc[0] = min(n, maxx+(ssubimsize-1)/2); trc[1] = min(m, maxy+(ssubimsize-1)/2)
        s_imsize = trc - blc + 1

        p_ini = [maxv, (s_imsize[0]-1)/2.0*1.1, (s_imsize[1]-1)/2.0*1.1, bm_pix[0]/fwsig*1.3, \
                 bm_pix[1]/fwsig*1.1, bm_pix[2]*2]
        data = subim_src[blc[0]:blc[0]+s_imsize[0], blc[1]:blc[1]+s_imsize[1]]
        smask = mask[blc[0]:blc[0]+s_imsize[0], blc[1]:blc[1]+s_imsize[1]]
        rmask = N.where(smask==isrc, False, True)
        x_ax, y_ax = N.indices(data.shape)

        if N.sum(~rmask) >=6:
          para, ierr = func.fit_gaus2d(data, p_ini, x_ax, y_ax, rmask)
          if (0.0<para[1]<s_imsize[0]) and (0.0<para[2]<s_imsize[1]) and \
            para[3]<s_imsize[0] and para[4]<s_imsize[1]:
            maxpeak = para[0]
          else:
            maxpeak = maxv
          posn = para[1:3]-(0.5*N.sum(s_imsize)-1)/2.0+N.array([maxx, maxy])-1+delc
        else:
          maxpeak = maxv
          posn = N.unravel_index(N.argmax(data*~rmask), data.shape)+N.array(delc) +blc

        # calculate peak by bilinear interpolation around centroid
        # First check that moment analysis gave a valid position. If not, use
        # posn from gaussian fit instead.
        if N.isnan(mompara[1]):
            mompara[1] = posn[0] - delc[0]
        x1 = N.int(N.floor(mompara[1]))
        if N.isnan(mompara[2]):
            mompara[2] = posn[1] - delc[1]
        y1 = N.int(N.floor(mompara[2]))
        xind = slice(x1, x1+2, 1); yind = slice(y1, y1+2, 1)
        if img.opts.flag_smallsrc and (N.sum(mask[xind, yind]==N.ones((2,2))*isrc) != 4):
            mylog.debug('Island = '+str(isl.island_id))
            mylog.debug('Mask = '+repr(mask[xind, yind])+'xind, yind, x1, y1 = '+repr(xind)+' '+repr(yind)+' '+repr(x1)+' '+repr(y1))
        t=(mompara[1]-x1)/(x1+1-x1)  # in case u change it later
        u=(mompara[2]-y1)/(y1+1-y1)
        try:
            s_peak=((1.0-t)*(1.0-u)*subim_src[x1,y1]+
                    t*(1.0-u)*subim_src[x1+1,y1]+
                    t*u*subim_src[x1+1,y1+1]+
                    (1.0-t)*u*subim_src[x1,y1+1])
        except IndexError:
            # interpolation failed because source is too small
            # probably pathological, take a guess..
            s_peak=subim_src[x1,y1]
        if (not img.opts.flag_smallsrc) and (N.sum(mask[xind, yind]==N.ones((2,2))*isrc) != 4):
            mylog.debug('Speak '+repr(s_peak)+'Mompara = '+repr(mompara))
            mylog.debug('x1, y1 : '+repr(x1)+', '+repr(y1))

        # Don't let s_peak fall too far below the normalized peak (this can
        # happen when, e.g., the centroid falls outside of the source)
        norm_peak = mompara[0]*bmar_p/(mompara[3]*mompara[4])
        if s_peak < norm_peak/2.0:
            s_peak = norm_peak/2.0

        # convert pixels to coords
        try:
            sra, sdec = img.pix2sky([mompara[1]+delc[0], mompara[2]+delc[1]])
            mra, mdec = img.pix2sky(posn)
        except RuntimeError as err:
            # Invalid pixel wcs coordinate
            sra, sdec = 0.0, 0.0
            mra, mdec = 0.0, 0.0

        # "deconvolve" the sizes
        gaus_c = [mompara[3], mompara[4], mompara[5]]
        gaus_bm = [bm_pix[0], bm_pix[1], bm_pix[2]]
        gaus_dc, err = func.deconv2(gaus_bm, gaus_c)
        deconv_size_sky = img.pix2gaus(gaus_dc, [mompara[1]+delc[0], mompara[2]+delc[1]])
        deconv_size_sky_uncorr = img.pix2gaus(gaus_dc, [mompara[1]+delc[0], mompara[2]+delc[1]], use_wcs=False)

        # update all objects etc
        tot = 0.0
        totE_sq = 0.0
        for g in g_sublist:
            tot += g.total_flux
            totE_sq += g.total_fluxE**2
        totE = sqrt(totE_sq)
        size_pix = [mompara[3], mompara[4], mompara[5]]
        size_sky = img.pix2gaus(size_pix, [mompara[1]+delc[0], mompara[2]+delc[1]])
        size_sky_uncorr = img.pix2gaus(size_pix, [mompara[1]+delc[0], mompara[2]+delc[1]], use_wcs=False)

        # Estimate uncertainties in source size and position due to
        # errors in the constituent Gaussians using a Monte Carlo technique.
        # Sum with Condon (1997) errors in quadrature.
        plist = mompara.tolist()+[tot]
        plist[0] = s_peak
        plist[3] /= fwsig
        plist[4] /= fwsig
        errors = func.get_errors(img, plist, isl.rms)

        if img.opts.do_mc_errors:
            nMC = 20
            mompara0_MC = N.zeros(nMC, dtype=N.float32)
            mompara1_MC = N.zeros(nMC, dtype=N.float32)
            mompara2_MC = N.zeros(nMC, dtype=N.float32)
            mompara3_MC = N.zeros(nMC, dtype=N.float32)
            mompara4_MC = N.zeros(nMC, dtype=N.float32)
            mompara5_MC = N.zeros(nMC, dtype=N.float32)
            for i in range(nMC):
                # Reconstruct source from component Gaussians. Draw the Gaussian
                # parameters from random distributions given by their errors.
                subim_src_MC = self.make_subim(subn, subm, g_sublist, delc, mc=True)

                try:
                    mompara_MC = func.momanalmask_gaus(subim_src_MC, mask, isrc, bmar_p, True)
                    mompara0_MC[i] = mompara_MC[0]
                    mompara1_MC[i] = mompara_MC[1]
                    mompara2_MC[i] = mompara_MC[2]
                    mompara3_MC[i] = mompara_MC[3]
                    mompara4_MC[i] = mompara_MC[4]
                    mompara5_MC[i] = mompara_MC[5]
                except:
                    mompara0_MC[i] = mompara[0]
                    mompara1_MC[i] = mompara[1]
                    mompara2_MC[i] = mompara[2]
                    mompara3_MC[i] = mompara[3]
                    mompara4_MC[i] = mompara[4]
                    mompara5_MC[i] = mompara[5]
            mompara0E = N.std(mompara0_MC)
            mompara1E = N.std(mompara1_MC)
            if mompara1E > 2.0*mompara[1]:
                mompara1E = 2.0*mompara[1] # Don't let errors get too large
            mompara2E = N.std(mompara2_MC)
            if mompara2E > 2.0*mompara[2]:
                mompara2E = 2.0*mompara[2] # Don't let errors get too large
            mompara3E = N.std(mompara3_MC)
            if mompara3E > 2.0*mompara[3]:
                mompara3E = 2.0*mompara[3] # Don't let errors get too large
            mompara4E = N.std(mompara4_MC)
            if mompara4E > 2.0*mompara[4]:
                mompara4E = 2.0*mompara[4] # Don't let errors get too large
            mompara5E = N.std(mompara5_MC)
            if mompara5E > 2.0*mompara[5]:
                mompara5E = 2.0*mompara[5] # Don't let errors get too large
        else:
             mompara1E = 0.0
             mompara2E = 0.0
             mompara3E = 0.0
             mompara4E = 0.0
             mompara5E = 0.0

        # Now add MC errors in quadrature with Condon (1997) errors
        size_skyE = [sqrt(mompara3E**2 + errors[3]**2) * sqrt(cdeltsq),
                     sqrt(mompara4E**2 + errors[4]**2) * sqrt(cdeltsq),
                     sqrt(mompara5E**2 + errors[5]**2)]
        sraE, sdecE = (sqrt(mompara1E**2 + errors[1]**2) * sqrt(cdeltsq),
                       sqrt(mompara2E**2 + errors[2]**2) * sqrt(cdeltsq))
        deconv_size_skyE = size_skyE # set deconvolved errors to non-deconvolved ones

        # Find aperture flux
        if img.opts.aperture_posn == 'centroid':
            aper_pos = [mompara[1]+delc[0], mompara[2]+delc[1]]
        else:
            aper_pos = posn
        aper_flux, aper_fluxE = func.ch0_aperture_flux(img, aper_pos, img.aperture)

        isl_id = isl.island_id
        source_prop = list(['M', [tot, totE], [s_peak, isl.rms], [maxpeak, isl.rms],
                      [aper_flux, aper_fluxE], [[sra, sdec],
                      [sraE, sdecE]], [[mra, mdec], [sraE, sdecE]], [size_sky, size_skyE], [size_sky_uncorr, size_skyE],
                      [deconv_size_sky, deconv_size_skyE], [deconv_size_sky_uncorr, deconv_size_skyE], isl.bbox, len(g_sublist),
                      isl_id, g_sublist])
        source = Source(img, source_prop)

        src_index += 1
        for g in g_sublist:
            g.source_id = src_index
            g.code = 'M'
        source.source_id = src_index

        return src_index, source
    print('shape of {} is {}'.format(key, h5_file[key].shape))

spectra_list = []
vn_coeff_list = []
for i in range(100, 110, 1):
    spect = h5_file['Spectra16'][i, :, :]
    spectra_list.append(spect[np.newaxis, ...])
    vn_coeff_list.append(h5_file['VN_coeff'][i, :])

spectra = np.concatenate(spectra_list, axis=0)

fig, ax = plt.subplots(nrows=int(spectra.shape[0]) * 2,
                       ncols=1,
                       figsize=(22, 17),
                       sharex=True)
grid = np.indices(dimensions=(int(spectra.shape[0]), 1))
row = grid[0].flatten() * 2
col = grid[1].flatten()
index = np.arange(spectra.shape[0])
max_y = np.max(spectra)
for ind, ro, co in zip(index, row, col):
    for sp in spectra[ind]:
        ax[ro].plot(sp)

    ax[ro].set_ylim([0, max_y])
    if ind == spectra.shape[0] - 1:
        ax[ro].set_xlabel('electron energy [eV]')

    ax[ro + 1].plot(np.real(vn_coeff_list[ind]))
    ax[ro + 1].plot(np.imag(vn_coeff_list[ind]))
Пример #41
0
def prepare_a_matrix_of_pattern_points(grid_size, grid_intersection_size):
    pattern_points = np.zeros((np.prod(grid_intersection_size), 3), np.float32)
    pattern_points[:, :2] = np.indices(grid_intersection_size).T.reshape(-1, 2)
    pattern_points *= grid_size
    return pattern_points
Пример #42
0
def th_iterproduct(*args):
    return th.from_numpy(np.indices(args).reshape((len(args), -1)).T)
Пример #43
0
def Rivers_General(Name_NC_DEM, Name_NC_DEM_Dir, Name_NC_Acc_Pixels, Name_NC_Rivers, Reference_data):

    import numpy as np
    from wa.General import raster_conversions as RC
    
    ############################### Open needed dataset ###########################
    
    # Extract flow direction data from NetCDF file
    flow_directions = RC.Open_nc_array(Name_NC_DEM_Dir)
     
    # Extract Rivers data from NetCDF file
    Rivers = RC.Open_nc_array(Name_NC_Rivers)

    # Extract DEM data from NetCDF file
    DEM = RC.Open_nc_array(Name_NC_DEM) 

    # Extract Accumulated pixels data from NetCDF file
    Accumulated_Pixels = RC.Open_nc_array(Name_NC_Acc_Pixels) 
			
    ############################### Create river tree #############################
        
    # Get the raster shape	
    size_Y, size_X = np.shape(flow_directions)
    
    # Create a river array with a boundary of 1 pixel
    Rivers_bounds = np.zeros([size_Y+2, size_X+2])
    Rivers_bounds[1:-1,1:-1] = Rivers	
    
    # Create a flow direction array with a boundary of 1 pixel
    flow_directions[flow_directions==0]=-32768
    flow_directions_bound = np.ones([size_Y+2, size_X+2]) * -32768
    flow_directions_bound[1:-1,1:-1] = flow_directions
    
    # Create ID Matrix
    y,x = np.indices((size_Y, size_X))
    ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y,size_X),mode='clip').reshape(x.shape))
    ID_Matrix_bound = np.ones([size_Y+2, size_X+2]) * -32768
    ID_Matrix_bound[1:-1,1:-1] = ID_Matrix + 1
    ID_Matrix_bound[flow_directions_bound==-32768]=-32768
    del  x, y
    
    # Empty total from and to arrays
    ID_to_total=np.array([])
    ID_from_total=np.array([])
    
    # The flow directions parameters of HydroSHED
    Directions = [1, 2, 4, 8, 16, 32, 64, 128]
    
    # Loop over the directions			
    for Direction in Directions:
    
        # empty from and to arrays for 1 direction				
        data_flow_to = np.zeros([size_Y + 2, size_X + 2])
        data_flow_from = np.zeros([size_Y + 2, size_X + 2])
    				
        # Get the ID of only the rivers
        data_flow_to_ID = np.zeros([size_Y + 2, size_X + 2])			
        data_flow_in = np.ones([size_Y + 2, size_X + 2])	* Rivers_bounds	
    				
        # Mask only one direction				
        data_flow_from[flow_directions_bound == Direction] = data_flow_in[flow_directions_bound == Direction] * ID_Matrix_bound[flow_directions_bound == Direction]
    
        # Add the data flow to ID  
        if Direction == 4:
            data_flow_to[1:,:] = data_flow_from[:-1,:]
        if Direction == 2:
            data_flow_to[1:,1:] = data_flow_from[:-1,:-1]
        if Direction == 1:
            data_flow_to[:,1:] = data_flow_from[:,:-1]
        if Direction == 128:
            data_flow_to[:-1,1:] = data_flow_from[1:,:-1]
        if Direction == 64:
            data_flow_to[:-1,:] = data_flow_from[1:,:]
        if Direction == 32:
            data_flow_to[:-1,:-1] = data_flow_from[1:,1:]
        if Direction == 16:
            data_flow_to[:,:-1] = data_flow_from[:,1:]
        if Direction == 8:
            data_flow_to[1:,:-1] = data_flow_from[:-1,1:]
    
        # mask out the no river pixels
        data_flow_to_ID[data_flow_to>0] = ID_Matrix_bound[data_flow_to>0]
    
        # Collect to and from arrays
        ID_from_total = np.append(ID_from_total,data_flow_from[data_flow_from!=0].ravel())		
        ID_to_total = np.append(ID_to_total,data_flow_to_ID[data_flow_to_ID!=0].ravel())				
    
    
    ######################## Define the starting point ############################
    
    # Define starting point
    Max_Acc_Pix = np.nanmax(Accumulated_Pixels[ID_Matrix_bound[1:-1,1:-1]>0])
    ncol, nrow = np.argwhere(Accumulated_Pixels==Max_Acc_Pix)[0]  				

    # Add Bounds				
    col = ncol + 1
    row = nrow + 1
    
    ############################ Route the river ##################################
    
    # Get the ID of the starting point
    ID_starts = [ID_Matrix_bound[col,row]]
    
    # Create an empty dictionary for the rivers
    River_dict = dict()
    
    # Create empty array for the loop
    ID_starts_next = []	
    i = 0  
    
    # Keep going on till all the branches are looped
    while len(ID_starts) > 0:
        for ID_start in ID_starts:
            ID_start = int(ID_start)
    								
            # Empty parameters for new starting point								
            new = 0
            IDs = []	
    								
            # Add starting point								
            Arrays_from = np.argwhere(ID_from_total[:] == ID_start)             
            ID_from = ID_to_total[int(Arrays_from[0])]							
            IDs = [ID_from, ID_start]
            ID_start_now = ID_start	
    
            # Keep going till the branch ends								
            while new == 0:					
     
                Arrays_to = np.argwhere(ID_to_total[:] == ID_start)
    
                # Add IDs to the river dictionary
                if len(Arrays_to)>1 or len(Arrays_to) == 0:
                    River_dict[i] = IDs
                    i += 1	
                    new = 1
     
                    # Define the next loop for the new branches             											
                    for j in range(0, len(Arrays_to)):													
                        ID_starts_next = np.append(ID_starts_next,ID_from_total[int(Arrays_to[j])])												
    
                    # If it was the last one then empty ID_start_next                               																								
                    if ID_start_now == ID_starts[-1]:
                        ID_starts = ID_starts_next
                        ID_starts_next = []	
    
                # Add pixel to tree for river dictionary						
                else:
                    ID_start = ID_from_total[Arrays_to[0]]						
                    IDs = np.append(IDs, ID_start)									
         
									
    ######################## Create dict distance and dict dem ####################							
    # Get raster information 
    geo_out, proj, size_X, size_Y = RC.Open_array_info(Reference_data)
    # Get the distance of a horizontal and vertical flow pixel (assuming it flows in a straight line)
    import wa.Functions.Start.Area_converter as AC
    vertical, horizontal = AC.Calc_dlat_dlon(geo_out,size_X, size_Y)
    
    # Calculate a diagonal flowing pixel (assuming it flos in a straight line)
    diagonal = np.power((np.square(vertical) + np.square(horizontal)),0.5)
    
    # Create empty distance array
    Distance = np.zeros([size_Y, size_X])
    
    # Fill in the distance array
    Distance[np.logical_or(flow_directions == 1,flow_directions == 16)] = horizontal[np.logical_or(flow_directions == 1,flow_directions == 16)]
    Distance[np.logical_or(flow_directions == 64,flow_directions == 4)] = vertical[np.logical_or(flow_directions == 64,flow_directions == 4)]
    Distance[np.logical_or(np.logical_or(np.logical_or(flow_directions == 32,flow_directions == 8),flow_directions == 128),flow_directions == 2)] = diagonal[np.logical_or(np.logical_or(np.logical_or(flow_directions == 32,flow_directions == 8),flow_directions == 128),flow_directions == 2)]						
    
    # Create empty dicionaries for discharge, distance, and DEM
    Discharge_dict = dict()
    Distance_dict = dict()
    DEM_dict = dict()
        
    # Create empty arrays needed for the loop
    River_end = []
    River_ends = np.zeros([2,3])
    
    # Loop over the branches
    for River_number in range(0,len(River_dict)):
	    
        # Get the pixels associated with the river section    
        River = River_dict[River_number]
        i=1	
	    			
        # Create empty arrays				
        Distances_river = np.zeros([len(River)])
        DEM_river = np.zeros([len(River)])	
        Discharge_river = np.zeros([len(River)])	
        
        # for the first pixel get the previous pixel value from another branche				
        row_start = np.argwhere(River_ends[:,0] == River[0])	
        if len(row_start) < 1:			
            Distances_river[0] = 0
            row, col = np.argwhere(ID_Matrix_bound == River[0])[0][:]
            DEM_river[0] = DEM[row - 1, col - 1] 
            Discharge_river[0] = -9999        
					
        else:
            Distances_river[0] = River_ends[row_start, 1]
            DEM_river[0] = River_ends[row_start, 2]
            row, col = np.argwhere(ID_Matrix_bound == River[0])[0][:]
            #Discharge_river[0] = Routed_Discharge[timestep, row - 1, col - 1] 	

        # For the other pixels get the value of the River ID pixel								
        for River_part in River[1:]:
            row, col = np.argwhere(ID_Matrix_bound == River_part)[0][:]
            Distances_river[i] = Distance[row - 1, col - 1]						
            DEM_river[i] = np.max([DEM_river[i-1],DEM[row - 1, col - 1]])  
            #Discharge_river[i] = Routed_Discharge[timestep, row - 1, col - 1]								
    
            if River_part == River[1] and Discharge_river[i-1] == -9999:
                Discharge_river[i - 1] = Discharge_river[i]         
  													
            i += 1

        # Write array in dictionary													
        DEM_dict[River_number] = DEM_river
        Discharge_dict[River_number] = Discharge_river 
        Distance_dict[River_number] = np.cumsum(Distances_river)
				
        # Save the last pixel value				
        River_end[:] = [River_part , np.cumsum(Distances_river)[-1], DEM_river[-1]]								
        River_ends = np.vstack((River_ends, River_end))	

    return(River_dict, DEM_dict, Distance_dict)
Пример #44
0
def segmentation_toy():
    """
    ===========================================
    Spectral clustering for image segmentation
    ===========================================

    In this example, an image with connected circles is generated and
    spectral clustering is used to separate the circles.

    In these settings, the :ref:`spectral_clustering` approach solves the problem
    know as 'normalized graph cuts': the image is seen as a graph of
    connected voxels, and the spectral clustering algorithm amounts to
    choosing graph cuts defining regions while minimizing the ratio of the
    gradient along the cut, and the volume of the region.

    As the algorithm tries to balance the volume (ie balance the region
    sizes), if we take circles with different sizes, the segmentation fails.

    In addition, as there is no useful information in the intensity of the image,
    or its gradient, we choose to perform the spectral clustering on a graph
    that is only weakly informed by the gradient. This is close to performing
    a Voronoi partition of the graph.

    In addition, we use the mask of the objects to restrict the graph to the
    outline of the objects. In this example, we are interested in
    separating the objects one from the other, and not from the background.
    """
    from sklearn.cluster import spectral_clustering
    from sklearn.feature_extraction import image
    import matplotlib.pyplot as plt
    import numpy as np
    np.random.seed(0)
    print(__doc__)

    # Authors:  Emmanuelle Gouillart <*****@*****.**>
    #           Gael Varoquaux <*****@*****.**>
    # License: BSD 3 clause

    l = 100
    x, y = np.indices((l, l))

    center1 = (28, 24)
    center2 = (40, 50)
    center3 = (67, 58)
    center4 = (24, 70)

    radius1, radius2, radius3, radius4 = 16, 14, 15, 14

    circle1 = (x - center1[0])**2 + (y - center1[1])**2 < radius1**2
    circle2 = (x - center2[0])**2 + (y - center2[1])**2 < radius2**2
    circle3 = (x - center3[0])**2 + (y - center3[1])**2 < radius3**2
    circle4 = (x - center4[0])**2 + (y - center4[1])**2 < radius4**2

    # #############################################################################
    # 4 circles
    img = circle1 + circle2 + circle3 + circle4

    # We use a mask that limits to the foreground: the problem that we are
    # interested in here is not separating the objects from the background,
    # but separating them one from the other.
    mask = img.astype(bool)

    img = img.astype(float)
    img += 1 + 0.2 * np.random.randn(*img.shape)

    # Convert the image into a graph with the value of the gradient on the
    # edges.
    graph = image.img_to_graph(img, mask=mask)

    # Take a decreasing function of the gradient: we take it weakly
    # dependent from the gradient the segmentation is close to a voronoi
    graph.data = np.exp(-graph.data / graph.data.std())

    # Force the solver to be arpack, since amg is numerically
    # unstable on this example
    labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
    label_im = np.full(mask.shape, -1.)
    label_im[mask] = labels

    plt.matshow(img)
    plt.matshow(label_im)

    # #############################################################################
    # 2 circles
    img = circle1 + circle2
    mask = img.astype(bool)
    img = img.astype(float)

    img += 1 + 0.2 * np.random.randn(*img.shape)

    graph = image.img_to_graph(img, mask=mask)
    graph.data = np.exp(-graph.data / graph.data.std())

    labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
    label_im = np.full(mask.shape, -1.)
    label_im[mask] = labels

    plt.matshow(img)
    plt.matshow(label_im)
Пример #45
0
def fake_image(n_sources=100,
               shape=[512, 512],
               amplitude_r=[0, 20000],
               std_dev=[0, 7],
               random_state=666,
               noise={
                   'type': None,
                   'mean': None,
                   'stddev': None
               }):
    """Creates fake image with Gaussian sources.

    Creates a fake image with gaussian sources, whose parameters can be
    adjusted based on the following arguments.

    A background with spatial fluctuations at various scales is
    created seperately. This is achived by first creating the desired 2D power
    spectrum, which is a radial power law in Fourier space. According to
    litrature, the index of ISM power law distribution is -2.9. Taking the
    inverse FFT of this p_law array gives a background with the desired levels
    of spatial fluctuations in real space.

        Args:
            n_sources(int):       Number of sources
            shape(2-tuple):       Dimensions of the image
            amplitude_r(list):    Range of amplitudes of sources
            std_dev(list):        Range of standard deviations of sources
            random_state(int):    Seed for random number generator
            noise(dictionary):    Parameters for noise
                (i)   type:       Gaussian or Poisson
                (ii)  mean:       Mean value of noise
                (iii) stddev:     Standard deviation of gaussian noise
    """

    param_ranges = [('amplitude', [amplitude_r[0], amplitude_r[1]]),
                    ('x_mean', [0, shape[1]]), ('y_mean', [0, shape[0]]),
                    ('x_stddev', [std_dev[0], std_dev[1]]),
                    ('y_stddev', [std_dev[0], std_dev[1]]),
                    ('theta', [0, np.pi])]
    param_ranges = OrderedDict(param_ranges)
    sources = make_random_models_table(n_sources,
                                       param_ranges,
                                       random_state=random_state)

    if noise['type'] is None:
        sources = make_gaussian_sources_image(shape, sources)
    else:
        sources = sources + make_noise_image(shape,
                                             type=noise['type'],
                                             mean=noise['mean'],
                                             stddev=noise['stddev'])

    # CREATING BACKGROUNG (ISM)
    # The objective is to create a background with different levels of
    # spatial fluctuations built in. This is achived by first creating the
    # desired 2D power spectrum, which is a radial power law in Fourier space.
    # According to litrature, the index of ISM power law distribution is -2.9
    # Taking the inverse FFT of this p_law array gives a background with the
    # desired levels of spatial fluctuations in real space.

    p_law = np.zeros(shape, dtype=float)
    y, x = np.indices(p_law.shape)
    center = np.array([(y.max() - y.min()) / 2.0, (x.max() - x.min()) / 2.0])
    r = np.hypot(x - center[1], y - center[0])

    r_ind = r.astype(int)
    r_max = r.max().astype(int)

    a = np.arange(0.1, r_max + 1.1, 1)  # These values control size of clouds
    b = 10**11 * a**(-2.9)  # This controls magnitude of background

    for i in range(0, r_max + 1):
        p_law[r_ind > i] = b[i]

    magnitude = np.sqrt(p_law)
    phase = 2 * np.pi * np.random.randn(shape[0], shape[1])
    FFT = magnitude * np.exp(1j * phase)
    background = np.abs((fftpack.ifft2(fftpack.fftshift(FFT))))

    sim_sky = sources + background

    return sources, background, sim_sky
Пример #46
0
    def triangular_blockwise_assign(self, A_qnn, A_NN, band_rank):
        """Assign the sub-blocks pertaining from a given rank to the lower
        triangular part of a Hermitian matrix A_NN. This subroutine is used
        for matrix assembly.

        Parameters:

        A_qnn: ndarray
            Sub-blocks belonging to the specified rank.
        A_NN: ndarray
            Full matrix in which to write contributions from sub-blocks.
        band_rank: int
            Communicator rank to which the sub-blocks belongs.

        Note that a Hermitian matrix requires Q=B//2+1 blocks of M x M
        elements where B is the communicator size and M=N//B for N bands.
        """
        N = self.bd.mynbands
        B = self.bd.comm.size
        assert band_rank in xrange(B)

        if B == 1:
            # Only fill in the lower part
            mask = np.tri(N).astype(bool)
            A_NN[mask] = A_qnn.reshape((N, N))[mask]
            return

        # A_qnn[q2,myn1,myn2] on rank q1 is the q2'th overlap calculated
        # between <psi_n1| and A|psit_n2> where n1 <-> (q1,myn1) and
        # n2 <-> ((q1+q2)%B,myn2) since we've sent/recieved q2 times.
        q1 = band_rank
        Q = B // 2 + 1
        if debug:
            assert A_qnn.shape == (Q, N, N)

        # Note that for integer inequalities, these relations are useful (X>0):
        #     A*X > B   <=>   A > B//X   ^   A*X <= B   <=>   A <= B//X

        if self.bd.strided:
            A_nbnb = A_NN.reshape((N, B, N, B))
            mask = np.empty((N, N), dtype=bool)
            for q2 in range(Q):
                # n1 = (q1+q2)%B + myn1*B   ^   n2 = q1 + myn2*B
                #
                # We seek the lower triangular part i.e. n1 >= n2
                #   <=>   (myn2-myn1)*B <= (q1+q2)%B-q1
                #   <=>   myn2-myn1 <= dq//B
                dq = (q1 + q2) % B - q1  # within ]-B; Q[ so dq//B is -1 or 0

                # Create mask for lower part of current block
                mask[:] = np.tri(N, N, dq // B)
                if debug:
                    m1, m2 = np.indices((N, N))
                    assert dq in xrange(-B + 1, Q)
                    assert (mask == (m1 >= m2 - dq // B)).all()

                # Copy lower part of A_qnn[q2] to its rightfull place
                A_nbnb[:, (q1 + q2) % B, :, q1][mask] = A_qnn[q2][mask]

                # Negate the transposed mask to get complementary mask
                mask = ~mask.T

                # Copy upper part of Hermitian conjugate of A_qnn[q2]
                A_nbnb[:, q1, :,
                       (q1 + q2) % B][mask] = A_qnn[q2].T.conj()[mask]
        else:
            A_bnbn = A_NN.reshape((B, N, B, N))

            # Optimization for the first block
            if q1 == 0:
                A_bnbn[:Q, :, 0] = A_qnn
                return

            for q2 in range(Q):
                # n1 = ((q1+q2)%B)*N + myn1   ^   n2 = q1*N + myn2
                #
                # We seek the lower triangular part i.e. n1 >= n2
                #   <=>   ((q1+q2)%B-q1)*N >= myn2-myn1
                #   <=>   myn2-myn1 <= dq*N
                #   <=>   entire block if dq > 0,
                #   ...   myn2 <= myn1 if dq == 0,
                #   ...   copy nothing if dq < 0
                if q1 + q2 < B:
                    A_bnbn[q1 + q2, :, q1] = A_qnn[q2]
                else:
                    A_bnbn[q1, :, q1 + q2 - B] = A_qnn[q2].T.conj()
Пример #47
0
    def get_kernel_matrix(quantum_instance, feature_map, x1_vec, x2_vec=None):
        """
        Construct kernel matrix, if x2_vec is None, self-innerproduct is conducted.

        Notes:
            When using `statevector_simulator`,
            we only build the circuits for Psi(x1)|0> rather than
            Psi(x2)^dagger Psi(x1)|0>, and then we perform the inner product classically.
            That is, for `statevector_simulator`,
            the total number of circuits will be O(N) rather than
            O(N^2) for `qasm_simulator`.

        Args:
            quantum_instance (QuantumInstance): quantum backend with all settings
            feature_map (FeatureMap): a feature map that maps data to feature space
            x1_vec (numpy.ndarray): data points, 2-D array, N1xD, where N1 is the number of data,
                                    D is the feature dimension
            x2_vec (numpy.ndarray): data points, 2-D array, N2xD, where N2 is the number of data,
                                    D is the feature dimension
        Returns:
            numpy.ndarray: 2-D matrix, N1xN2
        """

        use_parameterized_circuits = feature_map.support_parameterized_circuit

        if x2_vec is None:
            is_symmetric = True
            x2_vec = x1_vec
        else:
            is_symmetric = False

        is_statevector_sim = quantum_instance.is_statevector

        measurement = not is_statevector_sim
        measurement_basis = '0' * feature_map.num_qubits
        mat = np.ones((x1_vec.shape[0], x2_vec.shape[0]))

        # get all indices
        if is_symmetric:
            mus, nus = np.triu_indices(x1_vec.shape[0],
                                       k=1)  # remove diagonal term
        else:
            mus, nus = np.indices((x1_vec.shape[0], x2_vec.shape[0]))
            mus = np.asarray(mus.flat)
            nus = np.asarray(nus.flat)

        if is_statevector_sim:
            if is_symmetric:
                to_be_computed_data = x1_vec
            else:
                to_be_computed_data = np.concatenate((x1_vec, x2_vec))

            if use_parameterized_circuits:
                # build parameterized circuits, it could be slower for building circuit
                # but overall it should be faster since it only transpile one circuit
                feature_map_params = ParameterVector(
                    'x', feature_map.feature_dimension)
                parameterized_circuit = QSVM._construct_circuit(
                    (feature_map_params, feature_map_params),
                    feature_map,
                    measurement,
                    is_statevector_sim=is_statevector_sim)
                parameterized_circuit = quantum_instance.transpile(
                    parameterized_circuit)[0]
                circuits = [
                    parameterized_circuit.bind_parameters(
                        {feature_map_params: x}) for x in to_be_computed_data
                ]
            else:
                #  the second x is redundant
                to_be_computed_data_pair = [(x, x)
                                            for x in to_be_computed_data]
                if logger.isEnabledFor(logging.DEBUG):
                    logger.debug("Building circuits:")
                    TextProgressBar(sys.stderr)
                circuits = parallel_map(
                    QSVM._construct_circuit,
                    to_be_computed_data_pair,
                    task_args=(feature_map, measurement, is_statevector_sim),
                    num_processes=aqua_globals.num_processes)

            results = quantum_instance.execute(
                circuits, had_transpiled=use_parameterized_circuits)

            if logger.isEnabledFor(logging.DEBUG):
                logger.debug("Calculating overlap:")
                TextProgressBar(sys.stderr)

            offset = 0 if is_symmetric else len(x1_vec)
            matrix_elements = parallel_map(
                QSVM._compute_overlap,
                list(zip(mus, nus + offset)),
                task_args=(results, is_statevector_sim, measurement_basis),
                num_processes=aqua_globals.num_processes)

            for i, j, value in zip(mus, nus, matrix_elements):
                mat[i, j] = value
                if is_symmetric:
                    mat[j, i] = mat[i, j]
        else:
            for idx in range(0, len(mus), QSVM.BATCH_SIZE):
                to_be_computed_data_pair = []
                to_be_computed_index = []
                for sub_idx in range(idx, min(idx + QSVM.BATCH_SIZE,
                                              len(mus))):
                    i = mus[sub_idx]
                    j = nus[sub_idx]
                    x1 = x1_vec[i]
                    x2 = x2_vec[j]
                    if not np.all(x1 == x2):
                        to_be_computed_data_pair.append((x1, x2))
                        to_be_computed_index.append((i, j))

                if use_parameterized_circuits:
                    # build parameterized circuits, it could be slower for building circuit
                    # but overall it should be faster since it only transpile one circuit
                    feature_map_params_x = ParameterVector(
                        'x', feature_map.feature_dimension)
                    feature_map_params_y = ParameterVector(
                        'y', feature_map.feature_dimension)
                    parameterized_circuit = QSVM._construct_circuit(
                        (feature_map_params_x, feature_map_params_y),
                        feature_map,
                        measurement,
                        is_statevector_sim=is_statevector_sim)
                    parameterized_circuit = quantum_instance.transpile(
                        parameterized_circuit)[0]
                    circuits = [
                        parameterized_circuit.bind_parameters({
                            feature_map_params_x:
                            x,
                            feature_map_params_y:
                            y
                        }) for x, y in to_be_computed_data_pair
                    ]
                else:
                    if logger.isEnabledFor(logging.DEBUG):
                        logger.debug("Building circuits:")
                        TextProgressBar(sys.stderr)
                    circuits = parallel_map(
                        QSVM._construct_circuit,
                        to_be_computed_data_pair,
                        task_args=(feature_map, measurement),
                        num_processes=aqua_globals.num_processes)

                results = quantum_instance.execute(
                    circuits, had_transpiled=use_parameterized_circuits)

                if logger.isEnabledFor(logging.DEBUG):
                    logger.debug("Calculating overlap:")
                    TextProgressBar(sys.stderr)
                matrix_elements = parallel_map(
                    QSVM._compute_overlap,
                    range(len(circuits)),
                    task_args=(results, is_statevector_sim, measurement_basis),
                    num_processes=aqua_globals.num_processes)

                for (i, j), value in zip(to_be_computed_index,
                                         matrix_elements):
                    mat[i, j] = value
                    if is_symmetric:
                        mat[j, i] = mat[i, j]

        return mat
Пример #48
0
def calcflux(data, axc, ayc, radmax, radstep):
    """Calculate the total flux between radmax-radstep and radmax+radstep"""
    y, x = np.indices(data.shape)
    r = ((x - axc)**2 + (y - ayc)**2)**0.5
    mask = (r > radmax - radstep) * (r < radmax + radstep)
    return data[mask].sum()
Пример #49
0
def gisaxs(incident_beam,
           reflected_beam,
           pixel_size,
           detector_size,
           dist_sample,
           wavelength,
           theta_i=0.0):
    """
    This function will provide scattering wave vector(q) components(x, y, z),
    q parallel and incident and reflected angles for grazing-incidence small
    angle X-ray scattering (GISAXS) geometry.

    Parameters
    ----------
    incident_beam : tuple
        x and y co-ordinates of the incident beam in pixels
    reflected_beam : tuple
        x and y co-ordinates of the reflected beam in pixels
    pixel_size : tuple
        pixel_size in um
    detector_size: tuple
        2 element tuple defining no. of pixels(size) in the
        detector X and Y direction
    dist_sample : float
       sample to detector distance, in meters
    wavelength : float
        wavelength of the x-ray beam in Angstroms
    theta_i : float, optional
        out of plane angle, default 0.0

    Returns
    -------
    namedtuple
        `gisaxs_output` object is returned
        This `gisaxs_output` object contains, in this order:

        - alpha_i : float
          incident angle
        - theta_f : array
          out of plane angle
          shape (detector_size[0], detector_size[1])
        - alpha_f : array
          exit angle
          shape (detector_size[0], detector_size[1])
        - tilt_angle : float
          tilt angle
        - qx : array
          x component of the scattering wave vector
          shape (detector_size[0], detector_size[1])
        - qy : array
          y component of the scattering wave vector
          shape (detector_size[0], detector_size[1])
        - qz : array
          z component of the scattering wave vector
          shape (detector_size[0], detector_size[1])
        - qr : array
          q parallel component
          shape (detector_size[0], detector_size[1])

    Notes
    -----
    This implementation is based on published work. [1]_

    References
    ----------
    .. [1] R. Lazzari, "IsGISAXS: a program for grazing-incidence small-
       angle X-ray scattering analysis of supported islands," J. Appl.
       Cryst., vol 35, p 406-421, 2002.
    """
    inc_x, inc_y = incident_beam
    refl_x, refl_y = reflected_beam

    # convert pixel_size to meters
    pixel_size = np.asarray(pixel_size) * 10**(-6)

    # tilt angle
    tilt_angle = np.arctan2((refl_x - inc_x) * pixel_size[0],
                            (refl_y - inc_y) * pixel_size[1])
    # incident angle
    alpha_i = np.arctan2((refl_y - inc_y) * pixel_size[1], dist_sample) / 2.

    y, x = np.indices(detector_size)
    # exit angle
    alpha_f = np.arctan2((y - inc_y) * pixel_size[1], dist_sample) - alpha_i
    # out of plane angle
    two_theta = np.arctan2((x - inc_x) * pixel_size[0], dist_sample)
    theta_f = two_theta / 2 - theta_i
    # wave number
    wave_number = 2 * np.pi / wavelength

    # x component
    qx = (np.cos(alpha_f) * np.cos(2 * theta_f) -
          np.cos(alpha_i) * np.cos(2 * theta_i)) * wave_number

    # y component
    # the variables post-fixed with an underscore are intermediate steps
    qy_ = (np.cos(alpha_f) * np.sin(2 * theta_f) -
           np.cos(alpha_i) * np.sin(2 * theta_i))
    qz_ = np.sin(alpha_f) + np.sin(alpha_i)
    qy = (qz_ * np.sin(tilt_angle) + qy_ * np.cos(tilt_angle)) * wave_number

    # z component
    qz = (qz_ * np.cos(tilt_angle) - qy_ * np.sin(tilt_angle)) * wave_number

    # q parallel
    qr = np.sqrt(qx**2 + qy**2)

    return gisaxs_output(alpha_i, theta_f, alpha_f, tilt_angle, qx, qy, qz, qr)
Пример #50
0
import numpy as np


def saveAsPrg(fn, address, data):
    fo = open(fn, "wb")
    header = np.array((address & 255, address / 256), np.uint8).tostring()
    fo.write(header)
    fo.write(data)
    fo.close()


y, x = np.indices((200, 320))
yy = (y * 2 + 1 - 100)**2
xx = (x * 2 + 1 - 160)**2
r = (xx + yy)**.5
bits = (r / 6).astype(np.int) % 2

bits = bits.reshape((25, 8, 40, 8))
bits = bits.transpose((0, 2, 1, 3))
bits = np.sum(bits * [128, 64, 32, 16, 8, 4, 2, 1], axis=3)
if 1:
    bits[-1, -1, :] = [
        0xe8,
        0xd4,
        0xb2,
        0x71,
        0x8e,
        0x4d,
        0x2b,
        0x17,
    ]
Пример #51
0
ulcrnrlon, ulcrnrlat = awips221(ulcrnrx, ulcrnry, inverse=True)
print '4 crnrs of AWIPS grid 221:'
print llcrnrlon, llcrnrlat
print lrcrnrlon, lrcrnrlat
print urcrnrlon, urcrnrlat
print ulcrnrlon, ulcrnrlat
print 'from GRIB docs'
print '(see http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html)'
print '   -145.5  1.0'
print '   -68.318 0.897'
print '   -2.566 46.352'
print '   148.639 46.635'
# compute lons and lats for the whole AWIPS grid 221 (377x249).
dx = (urcrnrx - llcrnrx) / (nx - 1)
dy = (urcrnry - llcrnry) / (ny - 1)
x = llcrnrx + dx * N.indices((ny, nx), 'f')[1, :, :]
y = llcrnry + dy * N.indices((ny, nx), 'f')[0, :, :]
t1 = time.clock()
lons, lats = awips221(N.ravel(x).tolist(), N.ravel(y).tolist(), inverse=True)
t2 = time.clock()
print 'data in lists:'
print 'compute lats/lons for all points on AWIPS 221 grid (%sx%s)' % (nx, ny)
print 'max/min lons'
print min(lons), max(lons)
print 'max/min lats'
print min(lats), max(lats)
print 'took', t2 - t1, 'secs'
xa = array.array('f', N.ravel(x).tolist())
ya = array.array('f', N.ravel(y).tolist())
t1 = time.clock()
lons, lats = awips221(xa, ya, inverse=True)
Пример #52
0
def make_model_sources_image(shape, model, source_table, oversample=1):
    """
    Make an image containing sources generated from a user-specified
    model.

    Parameters
    ----------
    shape : 2-tuple of int
        The shape of the output 2D image.

    model : 2D astropy.modeling.models object
        The model to be used for rendering the sources.

    source_table : `~astropy.table.Table`
        Table of parameters for the sources.  Each row of the table
        corresponds to a source whose model parameters are defined by
        the column names, which must match the model parameter names.
        Column names that do not match model parameters will be ignored.
        Model parameters not defined in the table will be set to the
        ``model`` default value.

    oversample : float, optional
        The sampling factor used to discretize the models on a pixel
        grid.  If the value is 1.0 (the default), then the models will
        be discretized by taking the value at the center of the pixel
        bin.  Note that this method will not preserve the total flux of
        very small sources.  Otherwise, the models will be discretized
        by taking the average over an oversampled grid.  The pixels will
        be oversampled by the ``oversample`` factor.

    Returns
    -------
    image : 2D `~numpy.ndarray`
        Image containing model sources.

    See Also
    --------
    make_random_models_table, make_gaussian_sources_image

    Examples
    --------
    .. plot::
        :include-source:

        from collections import OrderedDict
        from astropy.modeling.models import Moffat2D
        from photutils.datasets import (make_random_models_table,
                                        make_model_sources_image)

        model = Moffat2D()
        n_sources = 10
        shape = (100, 100)
        param_ranges = [('amplitude', [100, 200]),
                        ('x_0', [0, shape[1]]),
                        ('y_0', [0, shape[0]]),
                        ('gamma', [5, 10]),
                        ('alpha', [1, 2])]
        param_ranges = OrderedDict(param_ranges)
        sources = make_random_models_table(n_sources, param_ranges,
                                           random_state=12345)

        data = make_model_sources_image(shape, model, sources)
        plt.imshow(data)
    """

    image = np.zeros(shape, dtype=np.float64)
    y, x = np.indices(shape)

    params_to_set = []
    for param in source_table.colnames:
        if param in model.param_names:
            params_to_set.append(param)

    # Save the initial parameter values so we can set them back when
    # done with the loop.  It's best not to copy a model, because some
    # models (e.g. PSF models) may have substantial amounts of data in
    # them.
    init_params = {param: getattr(model, param) for param in params_to_set}

    try:
        for i, source in enumerate(source_table):
            for param in params_to_set:
                setattr(model, param, source[param])

            if oversample == 1:
                image += model(x, y)
            else:
                image += discretize_model(model, (0, shape[1]),
                                          (0, shape[0]), mode='oversample',
                                          factor=oversample)
    finally:
        for param, value in init_params.items():
            setattr(model, param, value)

    return image
Пример #53
0
    def unravel_map(d,
                    y,
                    x,
                    raveled_signal,
                    raveled_bsig,
                    sn,
                    secsn,
                    size=156):
        from skimage.measure import label
        tile = d.split('/')[4][1:]
        _id = os.path.basename(d).split('-')[1].split('_')[0]

        def _ellipse(masked, center, a, b, phi):
            yi, xi = np.indices(masked.shape)
            yi = yi.ravel()[~np.isnan(masked).ravel()]
            xi = xi.ravel()[~np.isnan(masked).ravel()]

            xc = center[0]
            yc = center[1]

            ell = ((xi - xc) * np.cos(phi) +
                   (yi - yc) * np.sin(phi))**2. / a**2 + (
                       (xi - xc) * np.sin(phi) -
                       (yi - yc) * np.cos(phi))**2. / b**2

            tmpidx = np.where(ell < 1)[0]
            return len(tmpidx)

        minsn = 0.5
        unraveled_maps = np.zeros((2, size, size)) * np.nan
        unique_map = np.zeros((size, size))
        coords = zip(y.astype(int), x.astype(int))
        tmpdata = [raveled_signal[sn > minsn], raveled_bsig[secsn > minsn]]
        tmpsn = [sn, secsn]

        # a, b, phi = _get_ellipse_params('../run/images/{}/watershed_segmaps/_id-{}.fits'.format(tile, _id))
        a, b, phi = _get_ellipse_params(
            './{}/{}/watershed_segmaps/_id-{}.fits'.format(path, tile, _id))

        for i, raveled_data in enumerate(tmpdata):
            for j, (yi, xi) in enumerate(
                    zip(y[tmpsn[i] > minsn], x[tmpsn[i] > minsn])):
                unraveled_maps[i, int(yi), int(xi)] = raveled_data[j]
                unique_map[int(yi), int(xi)] = 1.

        labels = label(unique_map, neighbors=4)
        seg = labels == np.argmax(np.bincount(labels.flat)[1:]) + 1
        seg = seg.astype(float)
        seg = medfilt(seg)

        seg[seg > 0] = 1.
        seg = ndi.binary_fill_holes(seg).astype(float)
        seg[seg == 0] = np.nan

        # oldcounts = 0
        # newa = a/4.
        # e = b/a
        # while True:
        #     counts = _ellipse(seg, [77,77], newa, e*newa, phi)
        #     if counts>oldcounts:
        #         newa += 1.
        #         oldcounts = counts
        #     else:
        #         break

        plt.subplot(1, 1, 1)
        plt.imshow(seg, origin='lower')
        plt.show()
        newy, newx = np.indices(seg.shape)
        newy = newy.ravel()[~np.isnan(seg.ravel())]
        newx = newx.ravel()[~np.isnan(seg.ravel())]
        indices = [
            i for i, coord in enumerate(zip(y, x)) if coord in zip(newy, newx)
        ]
        outidx = [
            i for i, coord in enumerate(zip(y, x))
            if coord not in zip(newy, newx)
        ]

        # plt.close()
        return indices, outidx
Пример #54
0
def image_moment(image, x_order, y_order):
    y, x = np.indices(image.shape)
    return np.sum(x**x_order * y**y_order * image)
Пример #55
0
def congrid(a, newdims, method='linear', centre=False, minusone=False):
    '''Arbitrary resampling of source array to new dimension sizes.
    Currently only supports maintaining the same number of dimensions.
    To use 1-D arrays, first promote them to shape (x,1).
    
    Uses the same parameters and creates the same co-ordinate lookup points
    as IDL''s congrid routine, which apparently originally came from a VAX/VMS
    routine of the same name.

    method:
    neighbour - closest value from original data
    nearest and linear - uses n x 1-D interpolations using
                         scipy.interpolate.interp1d
    (see Numerical Recipes for validity of use of n 1-D interpolations)
    spline - uses ndimage.map_coordinates

    centre:
    True - interpolation points are at the centres of the bins
    False - points are at the front edge of the bin

    minusone:
    For example- inarray.shape = (i,j) & new dimensions = (x,y)
    False - inarray is resampled by factors of (i/x) * (j/y)
    True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
    This prevents extrapolation one element beyond bounds of input array.
    '''
    if not a.dtype in [n.float64, n.float32]:
        a = n.cast[float](a)

    m1 = n.cast[int](minusone)
    ofs = n.cast[int](centre) * 0.5
    old = n.array(a.shape)
    ndims = len(a.shape)
    if len(newdims) != ndims:
        print("[congrid] dimensions error. " \
              "This routine currently only support " \
              "rebinning to the same number of dimensions.")
        return None
    newdims = n.asarray(newdims, dtype=float)
    dimlist = []

    if method == 'neighbour':
        for i in range(ndims):
            base = n.indices(newdims)[i]
            dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
                            * (base + ofs) - ofs )
        cd = n.array(dimlist).round().astype(int)
        newa = a[list(cd)]
        return newa

    elif method in ['nearest', 'linear']:
        # calculate new dims
        for i in range(ndims):
            base = n.arange(newdims[i])
            dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
                            * (base + ofs) - ofs )
        # specify old dims
        olddims = [n.arange(i, dtype=n.float) for i in list(a.shape)]

        # first interpolation - for ndims = any
        mint = scipy.interpolate.interp1d(olddims[-1], a, kind=method)
        newa = mint(dimlist[-1])

        trorder = [ndims - 1] + list(range(ndims - 1))
        for i in range(ndims - 2, -1, -1):
            newa = newa.transpose(trorder)

            mint = scipy.interpolate.interp1d(olddims[i], newa, kind=method)
            newa = mint(dimlist[i])

        if ndims > 1:
            # need one more transpose to return to original dimensions
            newa = newa.transpose(trorder)

        return newa
    elif method in ['spline']:
        oslices = [slice(0, j) for j in old]
        oldcoords = n.ogrid[oslices]
        nslices = [slice(0, j) for j in list(newdims)]
        newcoords = n.mgrid[nslices]

        newcoords_dims = list(range(n.ndim(newcoords)))
        #make first index last
        newcoords_dims.append(newcoords_dims.pop(0))
        newcoords_tr = newcoords.transpose(newcoords_dims)
        # makes a view that affects newcoords

        newcoords_tr += ofs

        deltas = (n.asarray(old) - m1) / (newdims - m1)
        newcoords_tr *= deltas

        newcoords_tr -= ofs

        newa = scipy.ndimage.map_coordinates(a, newcoords)
        return newa
    else:
        print("Congrid error: Unrecognized interpolation type.\n", \
              "Currently only \'neighbour\', \'nearest\',\'linear\',", \
              "and \'spline\' are supported.")
        return None
Пример #56
0
    def __init__(self,
                 hexagonal=False,
                 L=200,
                 v_para=0.6,
                 v_tran_1=0.6,
                 v_tran_2=0.6,
                 d=0.05,
                 e=0.05,
                 rp=50,
                 tot_time=10**6,
                 pace_rate=220,
                 seed1=10,
                 seed2=20,
                 seed3=30,
                 seed4=40):
        self.hexagonal = hexagonal
        self.size = L
        self.first_col = np.arange(0, self.size * self.size, self.size)
        if self.hexagonal == False:
            self.transverse_prob = v_tran_1
        if self.hexagonal == True:
            self.transverse_prob_l = v_tran_1
            self.transverse_prob_r = v_tran_2
        self.parallel_prob = v_para
        self.dysfunctional_prob = d
        self.nonfire_prob = e
        self.rp = rp
        self.t = 0
        self.tot_time = tot_time
        self.tot_AF = 0  # overall
        self.t_AF = 0  # in this episode
        self.t_SR = 0  # in this period of SR
        self.pace_rate = pace_rate  # number of timesteps between sinus beats
        self.pace = np.arange(0, self.tot_time,
                              self.pace_rate)  # timesteps of SR beat
        self.seed_dysfunc = seed1
        self.seed_connect_tran = seed2
        self.seed_connect_para = seed3
        self.seed_prop = seed4
        self.index = np.arange(0, L * L)  # cell positions in each array
        self.position = self.index.reshape(self.size, self.size)
        self.y = np.indices(
            (self.size, self.size))[0]  # y coordinate for cells
        self.x = np.indices(
            (self.size, self.size))[1]  # x coordinate for cells

        if self.hexagonal == False:
            self.neighbours = np.full(((L * L) * 4),
                                      fill_value=None,
                                      dtype=float)
            self.start_n_down = self.size * self.size
            self.start_n_left = self.size * self.size * 2
            self.start_n_right = self.size * self.size * 3

        if self.hexagonal == True:
            self.n_up = np.full((self.size * self.size),
                                fill_value=None,
                                dtype=float)  # neighbours above
            self.n_down_right = np.full((self.size * self.size),
                                        fill_value=None,
                                        dtype=float)  # neighbours below
            self.n_up_right = np.full((self.size * self.size),
                                      fill_value=None,
                                      dtype=float)  # neighbours to right
            self.n_down_left = np.full((self.size * self.size),
                                       fill_value=None,
                                       dtype=float)  # neighbours below
            self.n_up_left = np.full((self.size * self.size),
                                     fill_value=None,
                                     dtype=float)  # neighbours to right
            self.n_left = np.full((self.size * self.size),
                                  fill_value=None,
                                  dtype=float)  #neighbours to left
            self.n_right = np.full((self.size * self.size),
                                   fill_value=None,
                                   dtype=float)  #neighbours to left

        self.phases = np.full(
            (L * L),
            fill_value=self.rp)  # state cell is in (0 = excited, rp = resting)
        self.time_for_ECG = np.arange(-500, 1)  # time for ECG plot
        self.potentials = np.zeros(len(self.time_for_ECG))  # ECG plot values
        self.V = np.full(
            (L * L), fill_value=-90.0
        )  # voltage depending on state of cell given in Supplementary Material
        self.dysfunctional_cells = np.full([L * L],
                                           fill_value=False,
                                           dtype=bool)
        self.states = [
            []
        ] * self.rp  # list of lists containing cells in each state except resting
        self.resting = np.full([L * L], fill_value=True,
                               dtype=bool)  # can they be excited
        self.tbe = np.full([L * L], fill_value=False,
                           dtype=bool)  # cells to be excited in next timestep

        #setting connections and dysfubctional cells
        if self.hexagonal == False:
            np.random.seed(self.seed_dysfunc)
            w = np.random.rand(L * L)
            np.random.seed(self.seed_connect_tran)
            z = np.random.rand(L * L)
            np.random.seed(self.seed_connect_para)
            y = np.random.rand(L * L)

            for j in self.index:

                if self.dysfunctional_prob > z[j]:  # dysfunctional
                    self.dysfunctional_cells[j] = False

                if self.dysfunctional_prob <= z[j]:  # functional
                    self.dysfunctional_cells[j] = True

            for j in self.index:

                if y[j] <= self.parallel_prob:

                    if j in np.arange(0, self.size * self.size, self.size):

                        self.neighbours[j +
                                        (self.size * self.size * 3)] = int(j +
                                                                           1)
                        self.neighbours[j + 1 +
                                        (self.size * self.size * 2)] = int(j)

                    elif j in (np.arange(0, self.size * self.size, self.size) +
                               L - 1):

                        self.neighbours[j + (self.size * self.size * 3)] = None

                    else:

                        self.neighbours[j +
                                        (self.size * self.size * 3)] = int(j +
                                                                           1)
                        self.neighbours[j + 1 +
                                        (self.size * self.size * 2)] = int(j)

            for j in self.index:
                if w[j] <= self.transverse_prob:
                    if j in np.arange(self.size * self.size - self.size,
                                      self.size * self.size):
                        self.neighbours[j + (self.size * self.size)] = j - (
                            self.size * self.size - self.size)
                        self.neighbours[j - (self.size * self.size -
                                             self.size)] = j
                    else:
                        self.neighbours[j + (self.size *
                                             self.size)] = j + self.size
                        self.neighbours[j + self.size] = j
        if self.hexagonal == True:
            np.random.seed(self.seed_dysfunc)
            w = np.random.rand(L * L)
            np.random.seed(self.seed_connect_tran)
            z = np.random.rand(L * L)
            u = np.random.rand(self.size * self.size)
            np.random.seed(self.seed_connect_para)
            y = np.random.rand(L * L)

            for j in self.index:
                if d > z[j]:  # dysfunctional
                    self.dysfunctional_cells[j] = False
                if d <= z[j]:  # functional
                    self.dysfunctional_cells[j] = True
                if y[j] <= self.parallel_prob:
                    if j in np.arange(0, self.size * self.size, self.size):
                        self.n_right[j] = j + 1
                    elif j in (np.arange(0, self.size * self.size, self.size) +
                               self.size - 1):
                        self.n_left[j] = j - 1
                    else:
                        self.n_left[j] = j - 1
                        self.n_right[j] = j + 1
            for j in self.index:
                #even
                if j in self.position[np.arange(0, self.size, 2)]:
                    if w[j] <= self.transverse_prob_l:
                        if j not in self.first_col:
                            self.n_down_left[j] = j + L - 1
                            self.n_up_right[j + L - 1] = j
                    if u[j] <= self.transverse_prob_r:
                        self.n_down_right[j] = j + L
                        self.n_up_left[j + L] = j
                #odd
                #if w[j] <= v:
                if j in self.position[np.arange(1, self.size, 2)]:
                    if j in np.arange(L * L - L, L * L):
                        if w[j] <= self.transverse_prob_l:
                            self.n_down_left[j] = j - (L * L - L)
                            self.n_up_right[j - (L * L - L)] = j
                    # if y[j] <= self.transverse_prob_r:
                    #    self.n_down_right[j] = j-(L*L-L)+1
                    #   self.n_up_left[j-(L*L-L)+1] = j

                    else:
                        if w[j] <= self.transverse_prob_l:
                            self.n_down_left[j] = j + L
                            self.n_up_right[j + L] = j
                        if u[j] <= self.transverse_prob_r:
                            if j not in (np.arange(0, self.size * self.size,
                                                   self.size) + self.size - 1):
                                self.n_down_right[j] = j + L + 1
                                self.n_up_left[j + L + 1] = j
        # functional and dysfunctional cells for first column (speeds up Sinus Rhythm)
        self.first_dys = np.array(
            self.first_col[~self.dysfunctional_cells[self.first_col]])
        self.first_fun = np.array(
            self.first_col[self.dysfunctional_cells[self.first_col]])
Пример #57
0
    def test_simple(self):
        fig, ax = plt.subplots(subplot_kw={"projection": "3d"})

        x, y, z = np.indices((5, 4, 3))
        voxels = (x == y) | (y == z)
        ax.voxels(voxels)
Пример #58
0
def util_sample_from_img(img):
    #possible positions to sample
    pos = np.indices(dimensions=img.shape)
    pos = pos.reshape(2, pos.shape[1] * pos.shape[2])
    img_flat = np.clip(img.flatten() / img.flatten().sum(), 0.0, 1.0)
    return pos[:, np.random.choice(np.arange(pos.shape[1]), 1, p=img_flat)]
Пример #59
0
    def get_resources(self, data_dictionary, dataset):
        """Create resources for computing a variable. """
        resources = Resources()
        for key in data_dictionary.keys():
            if key in self.datasets:
                data = data_dictionary[key]
                if self.id_names[key] not in data_dictionary[key].keys(
                ) and not isinstance(self.id_names[key], list):

                    data[self.id_names[key]] = arange(1,\
                        len(data_dictionary[key][data_dictionary[key].keys()[0]])+1) # add id array

                if key == "land_cover":
                    land_cover_storage = StorageFactory().get_storage(
                        'dict_storage')
                    land_cover_table_name = 'land_cover'
                    land_cover_storage.write_table(
                        table_name=land_cover_table_name,
                        table_data=data,
                    )

                    lc = LandCoverDataset(
                        in_storage=land_cover_storage,
                        in_table_name=land_cover_table_name,
                    )

                    # add relative_x and relative_y
                    lc.get_id_attribute()
                    n = int(ceil(sqrt(lc.size())))

                    if "relative_x" not in data.keys():
                        x = (indices((n, n)) + 1)[1].ravel()
                        lc.add_attribute(x[0:lc.size()],
                                         "relative_x",
                                         metadata=1)
                    if "relative_y" not in data.keys():
                        y = (indices((n, n)) + 1)[0].ravel()
                        lc.add_attribute(y[0:lc.size()],
                                         "relative_y",
                                         metadata=1)

                    resources.merge({key: lc})

                if key == "gridcell":
                    gridcell_storage = StorageFactory().get_storage(
                        'dict_storage')
                    gridcell_table_name = 'gridcell'
                    gridcell_storage.write_table(
                        table_name=gridcell_table_name,
                        table_data=data,
                    )

                    gridcell_dataset = GridcellDataset(
                        in_storage=gridcell_storage,
                        in_table_name=gridcell_table_name,
                    )

                    resources.merge({key: gridcell_dataset})
            else:
                resources.merge({key: data_dictionary[key]})

        if dataset in self.interactions:
            pass
        else:
            resources.merge({"dataset": resources[dataset]})
        resources.merge({"check_variables": '*', "debug": 4})
        return resources
Пример #60
0
bmap.drawparallels(np.arange(-90,90,30))
# lat/lon coordinates of five cities.
lats=[40.02,32.73,38.55,48.25,17.29]
lons=[-105.16,-117.16,-77.00,-114.21,-88.10]
cities=['Boulder, CO','San Diego, CA',
        'Washington, DC','Whitefish, MT','Belize City, Belize']
# compute the native map projection coordinates for cities.
xc,yc = bmap(lons,lats)
# plot filled circles at the locations of the cities.
bmap.plot(xc,yc,'bo')
# plot the names of those five cities.
for name,xpt,ypt in zip(cities,xc,yc):
    plt.text(xpt+50000,ypt+50000,name,fontsize=9)
# make up some data on a regular lat/lon grid.
nlats = 73; nlons = 145; delta = 2.*np.pi/(nlons-1)
lats = (0.5*np.pi-delta*np.indices((nlats,nlons))[0,:,:])
lons = (delta*np.indices((nlats,nlons))[1,:,:])
wave = 0.75*(np.sin(2.*lats)**8*np.cos(4.*lons))
mean = 0.5*np.cos(2.*lats)*((np.sin(2.*lats))**2 + 2.)
# compute native map projection coordinates of lat/lon grid.
x, y = bmap(lons*180./np.pi, lats*180./np.pi)
# contour data over the map.
cs = bmap.contour(x,y,wave+mean,15,linewidths=1.5)
plt.title('filled continent background')

# as above, but use land-sea mask image as map background.
fig = plt.figure()
bmap.drawmapboundary()
bmap.drawmeridians(np.arange(0,360,30))
bmap.drawparallels(np.arange(-90,90,30))
# plot filled circles at the locations of the cities.