def _process(self, p, phase_tuning): phase_image = phase_tuning.last cols, rows = phase_image.data.shape bounds = phase_image.bounds label = phase_image.label roi = p.roi if p.roi is not None else bounds.lbrt() with sorted_context(False): phase_tuning_df = DFrame(phase_tuning.sample((cols, rows), bounds=roi).dframe()) grid = phase_tuning_df.curve('Phase', 'Response', ['Time', 'x', 'y']).grid(['x', 'y']) results = Layout() sheet_stack = HoloMap(None, key_dimensions=grid.values()[0].key_dimensions) for idx, ((x, y), curve_stack) in enumerate(grid.items()): for key, curve in curve_stack.data.items(): if key not in sheet_stack: complexity = np.zeros((int(rows), int(cols))) sheet_stack[key] = Image(complexity, bounds, label=label, group='Modulation Ratio', value_dimensions=[Dimension('Modulation Ratio', range=(0, 2))]) row, col = phase_image.sheet2matrixidx(x, y) ydata = curve.dimension_values(1) fft = np.fft.fft(list(ydata) * p.fft_sampling) dc_value = abs(fft[0]) if dc_value != 0: modulation_ratio = 2 * abs(fft[p.fft_sampling]) / dc_value sheet_stack.data[key].data[row, col] = modulation_ratio results.set_path((label,), sheet_stack) return results
def groupby(cls, holocube, dims, container_type=HoloMap, group_type=None, **kwargs): """ Groups the data by one or more dimensions returning a container indexed by the grouped dimensions containing slices of the cube wrapped in the group_type. This makes it very easy to break up a high-dimensional HoloCube into smaller viewable chunks. """ if not isinstance(dims, list): dims = [dims] dims = [holocube.get_dimension(d) for d in dims] constraints = [d.name for d in dims] slice_dims = [d for d in holocube.kdims if d not in dims] unique_coords = product(*[cls.values(holocube, d, expanded=False) for d in dims]) data = [] for key in unique_coords: constraint = iris.Constraint(**dict(zip(constraints, key))) cube = holocube.clone(holocube.data.extract(constraint), new_type=group_type, **dict(kwargs, kdims=slice_dims)) data.append((key, cube)) if issubclass(container_type, NdMapping): with item_check(False), sorted_context(False): return container_type(data, kdims=dims) else: return container_type(data)
def test_columns_groupby(self): group1 = {'Age':[10,16], 'Weight':[15,18], 'Height':[0.8,0.6]} group2 = {'Age':[12], 'Weight':[10], 'Height':[0.8]} with sorted_context(False): grouped = HoloMap([('M', Columns(group1, kdims=['Age'], vdims=self.vdims)), ('F', Columns(group2, kdims=['Age'], vdims=self.vdims))], kdims=['Gender']) self.assertEqual(self.table.groupby(['Gender']), grouped)
def test_columns_groupby(self): group1 = {'Age': [10, 16], 'Weight': [15, 18], 'Height': [0.8, 0.6]} group2 = {'Age': [12], 'Weight': [10], 'Height': [0.8]} with sorted_context(False): grouped = HoloMap( [('M', Columns(group1, kdims=['Age'], vdims=self.vdims)), ('F', Columns(group2, kdims=['Age'], vdims=self.vdims))], kdims=['Gender']) self.assertEqual(self.table.groupby(['Gender']), grouped)
def groupby(cls, dataset, dims, container_type=HoloMap, group_type=None, **kwargs): """ Groups the data by one or more dimensions returning a container indexed by the grouped dimensions containing slices of the cube wrapped in the group_type. This makes it very easy to break up a high-dimensional dataset into smaller viewable chunks. """ import iris if not isinstance(dims, list): dims = [dims] dims = [dataset.get_dimension(d, strict=True) for d in dims] constraints = [d.name for d in dims] slice_dims = [d for d in dataset.kdims if d not in dims] # Update the kwargs appropriately for Element group types group_kwargs = {} group_type = dict if group_type == 'raw' else group_type if issubclass(group_type, Element): group_kwargs.update(util.get_param_values(dataset)) group_kwargs['kdims'] = slice_dims group_kwargs.update(kwargs) drop_dim = any(d not in group_kwargs['kdims'] for d in slice_dims) unique_coords = product( *[cls.values(dataset, d, expanded=False) for d in dims]) data = [] for key in unique_coords: constraint = iris.Constraint(**dict(zip(constraints, key))) extracted = dataset.data.extract(constraint) if drop_dim: extracted = group_type(extracted, kdims=slice_dims, vdims=dataset.vdims).columns() cube = group_type(extracted, **group_kwargs) data.append((key, cube)) if issubclass(container_type, NdMapping): with item_check(False), sorted_context(False): return container_type(data, kdims=dims) else: return container_type(data)
def groupby(cls, holocube, dims, container_type=HoloMap, group_type=None, **kwargs): """ Groups the data by one or more dimensions returning a container indexed by the grouped dimensions containing slices of the cube wrapped in the group_type. This makes it very easy to break up a high-dimensional HoloCube into smaller viewable chunks. """ if not isinstance(dims, list): dims = [dims] dynamic = kwargs.get('dynamic', False) dims = [holocube.get_dimension(d) for d in dims] constraints = [d.name for d in dims] slice_dims = [d for d in holocube.kdims if d not in dims] if dynamic: def load_subset(*args): constraint = iris.Constraint(**dict(zip(constraints, args))) return holocube.clone(holocube.data.extract(constraint), new_type=group_type, **dict(kwargs, kdims=slice_dims)) dynamic_dims = [ d(values=list(cls.values(holocube, d, False))) for d in dims ] return DynamicMap(load_subset, kdims=dynamic_dims) unique_coords = product( *[cls.values(holocube, d, expanded=False) for d in dims]) data = [] for key in unique_coords: constraint = iris.Constraint(**dict(zip(constraints, key))) cube = holocube.clone(holocube.data.extract(constraint), new_type=group_type, **dict(kwargs, kdims=slice_dims)) data.append((key, cube)) if issubclass(container_type, NdMapping): with item_check(False), sorted_context(False): return container_type(data, kdims=dims) else: return container_type(data)
def groupby(cls, dataset, dims, container_type=HoloMap, group_type=None, **kwargs): """ Groups the data by one or more dimensions returning a container indexed by the grouped dimensions containing slices of the cube wrapped in the group_type. This makes it very easy to break up a high-dimensional dataset into smaller viewable chunks. """ import iris if not isinstance(dims, list): dims = [dims] dims = [dataset.get_dimension(d, strict=True) for d in dims] constraints = [d.name for d in dims] slice_dims = [d for d in dataset.kdims if d not in dims] # Update the kwargs appropriately for Element group types group_kwargs = {} group_type = dict if group_type == 'raw' else group_type if issubclass(group_type, Element): group_kwargs.update(util.get_param_values(dataset)) group_kwargs['kdims'] = slice_dims group_kwargs.update(kwargs) drop_dim = any(d not in group_kwargs['kdims'] for d in slice_dims) unique_coords = product(*[cls.values(dataset, d, expanded=False) for d in dims]) data = [] for key in unique_coords: constraint = iris.Constraint(**dict(zip(constraints, key))) extracted = dataset.data.extract(constraint) if drop_dim: extracted = group_type(extracted, kdims=slice_dims, vdims=dataset.vdims).columns() cube = group_type(extracted, **group_kwargs) data.append((key, cube)) if issubclass(container_type, NdMapping): with item_check(False), sorted_context(False): return container_type(data, kdims=dims) else: return container_type(data)